vendor: Bump Ignition to 0.7.0, add Fuze

This commit is contained in:
Dalton Hubble
2016-06-22 11:19:26 -07:00
parent c27f6ad50d
commit 114a229ade
107 changed files with 14158 additions and 1525 deletions

12
glide.lock generated
View File

@@ -1,5 +1,5 @@
hash: e7ed76c4a5812e4c198db9d3f153cac04113225f58bb82966e4eca08386eda20
updated: 2016-05-12T16:50:32.137050044-07:00
hash: f3b21a59ce256ab9ea3bb3568d1238397d84993afa2c15fab5b30abb97c558e3
updated: 2016-06-23T16:06:01.323815777-07:00
imports:
- name: github.com/alecthomas/units
version: 2efee857e7cfd4f3d0138cc3cbb1b4966962b93a
@@ -11,6 +11,10 @@ imports:
version: b3f805dee6a4aa5ed298a1f370284df470eecf43
subpackages:
- config
- name: github.com/coreos/fuze
version: 60c987a0aba4976ac6cbc9350671c2fedc431e8b
subpackages:
- config
- name: github.com/coreos/go-semver
version: 294930c1e79c64e7dbe360054274fdad492c8cf5
subpackages:
@@ -20,7 +24,7 @@ imports:
subpackages:
- journal
- name: github.com/coreos/ignition
version: 44c274ab414294a8e34b3a940e0ec1afe6b6c610
version: b6850837b3b9bd17b673e58b5c406b5e4192ddca
subpackages:
- config
- config/types
@@ -37,6 +41,8 @@ imports:
version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
subpackages:
- spew
- name: github.com/go-yaml/yaml
version: a83829b6f1293c91addabc89d0571c246397bbf4
- name: github.com/golang/protobuf
version: 7cc19b78d562895b13596ddce7aafb59dd789318
subpackages:

View File

@@ -18,12 +18,13 @@ import:
subpackages:
- journal
- package: github.com/coreos/ignition
version: 44c274ab414294a8e34b3a940e0ec1afe6b6c610
version: b6850837b3b9bd17b673e58b5c406b5e4192ddca
subpackages:
- config
- package: github.com/coreos/fuze
version: 60c987a0aba4976ac6cbc9350671c2fedc431e8b
subpackages:
- config
- config/types
- config/v1
- config/v1/types
- package: github.com/coreos/pkg
version: 66fe44ad037ccb80329115cb4db0dbe8e9beb03a
subpackages:

3
vendor/github.com/coreos/fuze/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,3 @@
bin/
gopath/
*.swp

12
vendor/github.com/coreos/fuze/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,12 @@
language: go
matrix:
include:
- go: 1.5
env: GO15VENDOREXPERIMENT=1
- go: 1.6
install:
-
script:
- ./test

71
vendor/github.com/coreos/fuze/CONTRIBUTING.md generated vendored Normal file
View File

@@ -0,0 +1,71 @@
# How to Contribute
CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via
GitHub pull requests. This document outlines some of the conventions on
development workflow, commit message formatting, contact points and other
resources to make it easier to get your contribution accepted.
# Certificate of Origin
By contributing to this project you agree to the Developer Certificate of
Origin (DCO). This document was created by the Linux Kernel community and is a
simple statement that you, as a contributor, have the legal right to make the
contribution. See the [DCO](DCO) file for details.
# Email and Chat
The project currently uses the general CoreOS email list and IRC channel:
- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev)
- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org
Please avoid emailing maintainers found in the MAINTAINERS file directly. They
are very busy and read the mailing lists.
## Getting Started
- Fork the repository on GitHub
- Read the [README](README.md) for build and test instructions
- Play with the project, submit bugs, submit patches!
## Contribution Flow
This is a rough outline of what a contributor's workflow looks like:
- Create a topic branch from where you want to base your work (usually master).
- Make commits of logical units.
- Make sure your commit messages are in the proper format (see below).
- Push your changes to a topic branch in your fork of the repository.
- Make sure the tests pass, and add any new tests as appropriate.
- Submit a pull request to the original repository.
Thanks for your contributions!
### Format of the Commit Message
We follow a rough convention for commit messages that is designed to answer two
questions: what changed and why. The subject line should feature the what and
the body of the commit should describe the why.
```
scripts: add the test-cluster command
this uses tmux to setup a test cluster that you can easily kill and
start for debugging.
Fixes #38
```
The format can be described more formally as follows:
```
<subsystem>: <what changed>
<BLANK LINE>
<why this change was made>
<BLANK LINE>
<footer>
```
The first line is the subject and should be no longer than 70 characters, the
second line is always blank, and other lines should be wrapped at 80 characters.
This allows the message to be easier to read on GitHub as well as in various
git tools.

36
vendor/github.com/coreos/fuze/DCO generated vendored Normal file
View File

@@ -0,0 +1,36 @@
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.

202
vendor/github.com/coreos/fuze/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

5
vendor/github.com/coreos/fuze/NOTICE generated vendored Normal file
View File

@@ -0,0 +1,5 @@
CoreOS Project
Copyright 2014 CoreOS, Inc
This product includes software developed at CoreOS, Inc.
(http://www.coreos.com/).

5
vendor/github.com/coreos/fuze/README.md generated vendored Normal file
View File

@@ -0,0 +1,5 @@
# Fuze
Fuze is the utility responsible for transforming (fuzing) an
[Ignition](https://github.com/coreos/ignition) configuration from
human-friendly YAML into machine-friendly JSON for consumption.

18
vendor/github.com/coreos/fuze/build generated vendored Executable file
View File

@@ -0,0 +1,18 @@
#!/bin/bash -eu
PROJ="fuze"
ORG_PATH="github.com/coreos"
REPO_PATH="${ORG_PATH}/${PROJ}"
if [ ! -h gopath/src/${REPO_PATH} ]; then
mkdir -p gopath/src/${ORG_PATH}
ln -s ../../../.. gopath/src/${REPO_PATH} || exit 255
fi
export GOBIN=${PWD}/bin
export GOPATH=${PWD}/gopath
eval $(go env)
echo "Building ${PROJ}..."
go build -o ${GOBIN}/${PROJ} ${REPO_PATH}/internal

40
vendor/github.com/coreos/fuze/config/config.go generated vendored Normal file
View File

@@ -0,0 +1,40 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"reflect"
"github.com/coreos/ignition/config/types"
"github.com/go-yaml/yaml"
)
func ParseAsV2_0_0(data []byte) (types.Config, error) {
var cfg Config
if err := yaml.Unmarshal(data, &cfg); err != nil {
return types.Config{}, err
}
var keyMap map[interface{}]interface{}
if err := yaml.Unmarshal(data, &keyMap); err != nil {
return types.Config{}, err
}
if err := assertKeysValid(keyMap, reflect.TypeOf(Config{})); err != nil {
return types.Config{}, err
}
return ConvertAs2_0_0(cfg)
}

516
vendor/github.com/coreos/fuze/config/config_test.go generated vendored Normal file
View File

@@ -0,0 +1,516 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"errors"
"reflect"
"testing"
"github.com/coreos/ignition/config/types"
)
func TestParseAsV2_0_0(t *testing.T) {
type in struct {
data string
}
type out struct {
cfg types.Config
err error
}
tests := []struct {
in in
out out
}{
{
in: in{data: ``},
out: out{cfg: types.Config{Ignition: types.Ignition{Version: types.IgnitionVersion{Major: 2}}}},
},
// Errors
{
in: in{data: `foo:`},
out: out{err: ErrKeysUnrecognized{"foo"}},
},
{
in: in{data: `
networkd:
units:
- name: bad.blah
contents: not valid
`},
out: out{err: errors.New("invalid networkd unit extension")},
},
// Config
{
in: in{data: `
ignition:
config:
append:
- source: http://example.com/test1
verification:
hash:
function: sha512
sum: 00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
- source: http://example.com/test2
replace:
source: http://example.com/test3
verification:
hash:
function: sha512
sum: 00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
`},
out: out{cfg: types.Config{
Ignition: types.Ignition{
Version: types.IgnitionVersion{Major: 2},
Config: types.IgnitionConfig{
Append: []types.ConfigReference{
{
Source: types.Url{
Scheme: "http",
Host: "example.com",
Path: "/test1",
},
Verification: types.Verification{
Hash: &types.Hash{
Function: "sha512",
Sum: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
},
},
},
{
Source: types.Url{
Scheme: "http",
Host: "example.com",
Path: "/test2",
},
},
},
Replace: &types.ConfigReference{
Source: types.Url{
Scheme: "http",
Host: "example.com",
Path: "/test3",
},
Verification: types.Verification{
Hash: &types.Hash{
Function: "sha512",
Sum: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
},
},
},
},
},
}},
},
// Storage
{
in: in{data: `
storage:
disks:
- device: /dev/sda
wipe_table: true
partitions:
- label: ROOT
number: 7
size: 100MB
start: 50MB
type_guid: 11111111-1111-1111-1111-111111111111
- label: DATA
number: 12
size: 1GB
start: 300MB
type_guid: 00000000-0000-0000-0000-000000000000
- label: NOTHING
- device: /dev/sdb
wipe_table: true
raid:
- name: fast
level: raid0
devices:
- /dev/sdc
- /dev/sdd
- name: durable
level: raid1
devices:
- /dev/sde
- /dev/sdf
- /dev/sdg
spares: 1
filesystems:
- name: filesystem1
mount:
device: /dev/disk/by-partlabel/ROOT
format: btrfs
create:
force: true
options:
- -L
- ROOT
- name: filesystem2
mount:
device: /dev/disk/by-partlabel/DATA
format: ext4
- name: filesystem3
path: /sysroot
files:
- path: /opt/file1
filesystem: filesystem1
contents:
inline: file1
mode: 0644
user:
id: 500
group:
id: 501
- path: /opt/file2
filesystem: filesystem1
contents:
remote:
url: http://example.com/file2
compression: gzip
verification:
hash:
function: sha512
sum: 00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
mode: 0644
user:
id: 502
group:
id: 503
- path: /opt/file3
filesystem: filesystem2
contents:
remote:
url: http://example.com/file3
compression: gzip
mode: 0400
user:
id: 1000
group:
id: 1001
- path: /opt/file4
filesystem: filesystem2
`},
out: out{cfg: types.Config{
Ignition: types.Ignition{Version: types.IgnitionVersion{Major: 2}},
Storage: types.Storage{
Disks: []types.Disk{
{
Device: types.Path("/dev/sda"),
WipeTable: true,
Partitions: []types.Partition{
{
Label: types.PartitionLabel("ROOT"),
Number: 7,
Size: types.PartitionDimension(0x32000),
Start: types.PartitionDimension(0x19000),
TypeGUID: "11111111-1111-1111-1111-111111111111",
},
{
Label: types.PartitionLabel("DATA"),
Number: 12,
Size: types.PartitionDimension(0x200000),
Start: types.PartitionDimension(0x96000),
TypeGUID: "00000000-0000-0000-0000-000000000000",
},
{
Label: types.PartitionLabel("NOTHING"),
},
},
},
{
Device: types.Path("/dev/sdb"),
WipeTable: true,
},
},
Arrays: []types.Raid{
{
Name: "fast",
Level: "raid0",
Devices: []types.Path{types.Path("/dev/sdc"), types.Path("/dev/sdd")},
},
{
Name: "durable",
Level: "raid1",
Devices: []types.Path{types.Path("/dev/sde"), types.Path("/dev/sdf"), types.Path("/dev/sdg")},
Spares: 1,
},
},
Filesystems: []types.Filesystem{
{
Name: "filesystem1",
Mount: &types.FilesystemMount{
Device: types.Path("/dev/disk/by-partlabel/ROOT"),
Format: types.FilesystemFormat("btrfs"),
Create: &types.FilesystemCreate{
Force: true,
Options: types.MkfsOptions([]string{"-L", "ROOT"}),
},
},
},
{
Name: "filesystem2",
Mount: &types.FilesystemMount{
Device: types.Path("/dev/disk/by-partlabel/DATA"),
Format: types.FilesystemFormat("ext4"),
},
},
{
Name: "filesystem3",
Path: func(p types.Path) *types.Path { return &p }("/sysroot"),
},
},
Files: []types.File{
{
Filesystem: "filesystem1",
Path: types.Path("/opt/file1"),
Contents: types.FileContents{
Source: types.Url{
Scheme: "data",
Opaque: ",file1",
},
},
Mode: types.FileMode(0644),
User: types.FileUser{Id: 500},
Group: types.FileGroup{Id: 501},
},
{
Filesystem: "filesystem1",
Path: types.Path("/opt/file2"),
Contents: types.FileContents{
Source: types.Url{
Scheme: "http",
Host: "example.com",
Path: "/file2",
},
Compression: "gzip",
Verification: types.Verification{
Hash: &types.Hash{
Function: "sha512",
Sum: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
},
},
},
Mode: types.FileMode(0644),
User: types.FileUser{Id: 502},
Group: types.FileGroup{Id: 503},
},
{
Filesystem: "filesystem2",
Path: types.Path("/opt/file3"),
Contents: types.FileContents{
Source: types.Url{
Scheme: "http",
Host: "example.com",
Path: "/file3",
},
Compression: "gzip",
},
Mode: types.FileMode(0400),
User: types.FileUser{Id: 1000},
Group: types.FileGroup{Id: 1001},
},
{
Filesystem: "filesystem2",
Path: types.Path("/opt/file4"),
Contents: types.FileContents{
Source: types.Url{
Scheme: "data",
Opaque: ",",
},
},
},
},
},
}},
},
// systemd
{
in: in{data: `
systemd:
units:
- name: test1.service
enable: true
contents: test1 contents
dropins:
- name: conf1.conf
contents: conf1 contents
- name: conf2.conf
contents: conf2 contents
- name: test2.service
mask: true
contents: test2 contents
`},
out: out{cfg: types.Config{
Ignition: types.Ignition{Version: types.IgnitionVersion{Major: 2}},
Systemd: types.Systemd{
Units: []types.SystemdUnit{
{
Name: "test1.service",
Enable: true,
Contents: "test1 contents",
DropIns: []types.SystemdUnitDropIn{
{
Name: "conf1.conf",
Contents: "conf1 contents",
},
{
Name: "conf2.conf",
Contents: "conf2 contents",
},
},
},
{
Name: "test2.service",
Mask: true,
Contents: "test2 contents",
},
},
},
}},
},
// networkd
{
in: in{data: `
networkd:
units:
- name: empty.netdev
- name: test.network
contents: test config
`},
out: out{cfg: types.Config{
Ignition: types.Ignition{Version: types.IgnitionVersion{Major: 2}},
Networkd: types.Networkd{
Units: []types.NetworkdUnit{
{
Name: "empty.netdev",
},
{
Name: "test.network",
Contents: "test config",
},
},
},
}},
},
// passwd
{
in: in{data: `
passwd:
users:
- name: user 1
password_hash: password 1
ssh_authorized_keys:
- key1
- key2
- name: user 2
password_hash: password 2
ssh_authorized_keys:
- key3
- key4
create:
uid: 123
gecos: gecos
home_dir: /home/user 2
no_create_home: true
primary_group: wheel
groups:
- wheel
- plugdev
no_user_group: true
system: true
no_log_init: true
shell: /bin/zsh
- name: user 3
password_hash: password 3
ssh_authorized_keys:
- key5
- key6
create: {}
groups:
- name: group 1
gid: 1000
password_hash: password 1
system: true
- name: group 2
password_hash: password 2
`},
out: out{cfg: types.Config{
Ignition: types.Ignition{Version: types.IgnitionVersion{Major: 2}},
Passwd: types.Passwd{
Users: []types.User{
{
Name: "user 1",
PasswordHash: "password 1",
SSHAuthorizedKeys: []string{"key1", "key2"},
},
{
Name: "user 2",
PasswordHash: "password 2",
SSHAuthorizedKeys: []string{"key3", "key4"},
Create: &types.UserCreate{
Uid: func(i uint) *uint { return &i }(123),
GECOS: "gecos",
Homedir: "/home/user 2",
NoCreateHome: true,
PrimaryGroup: "wheel",
Groups: []string{"wheel", "plugdev"},
NoUserGroup: true,
System: true,
NoLogInit: true,
Shell: "/bin/zsh",
},
},
{
Name: "user 3",
PasswordHash: "password 3",
SSHAuthorizedKeys: []string{"key5", "key6"},
Create: &types.UserCreate{},
},
},
Groups: []types.Group{
{
Name: "group 1",
Gid: func(i uint) *uint { return &i }(1000),
PasswordHash: "password 1",
System: true,
},
{
Name: "group 2",
PasswordHash: "password 2",
},
},
},
}},
},
}
for i, test := range tests {
cfg, err := ParseAsV2_0_0([]byte(test.in.data))
if !reflect.DeepEqual(err, test.out.err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
if !reflect.DeepEqual(cfg, test.out.cfg) {
t.Errorf("#%d: bad config: want %#v, got %#v", i, test.out.cfg, cfg)
}
}
}

276
vendor/github.com/coreos/fuze/config/convert.go generated vendored Normal file
View File

@@ -0,0 +1,276 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"fmt"
"net/url"
"github.com/alecthomas/units"
"github.com/coreos/ignition/config/types"
"github.com/vincent-petithory/dataurl"
)
const (
BYTES_PER_SECTOR = 512
)
func ConvertAs2_0_0(in Config) (types.Config, error) {
out := types.Config{
Ignition: types.Ignition{
Version: types.IgnitionVersion{Major: 2, Minor: 0},
},
}
for _, ref := range in.Ignition.Config.Append {
newRef, err := convertConfigReference(ref)
if err != nil {
return types.Config{}, err
}
out.Ignition.Config.Append = append(out.Ignition.Config.Append, newRef)
}
if in.Ignition.Config.Replace != nil {
newRef, err := convertConfigReference(*in.Ignition.Config.Replace)
if err != nil {
return types.Config{}, err
}
out.Ignition.Config.Replace = &newRef
}
for _, disk := range in.Storage.Disks {
newDisk := types.Disk{
Device: types.Path(disk.Device),
WipeTable: disk.WipeTable,
}
for _, partition := range disk.Partitions {
size, err := convertPartitionDimension(partition.Size)
if err != nil {
return types.Config{}, err
}
start, err := convertPartitionDimension(partition.Start)
if err != nil {
return types.Config{}, err
}
newDisk.Partitions = append(newDisk.Partitions, types.Partition{
Label: types.PartitionLabel(partition.Label),
Number: partition.Number,
Size: size,
Start: start,
TypeGUID: types.PartitionTypeGUID(partition.TypeGUID),
})
}
out.Storage.Disks = append(out.Storage.Disks, newDisk)
}
for _, array := range in.Storage.Arrays {
newArray := types.Raid{
Name: array.Name,
Level: array.Level,
Spares: array.Spares,
}
for _, device := range array.Devices {
newArray.Devices = append(newArray.Devices, types.Path(device))
}
out.Storage.Arrays = append(out.Storage.Arrays, newArray)
}
for _, filesystem := range in.Storage.Filesystems {
newFilesystem := types.Filesystem{
Name: filesystem.Name,
Path: func(p types.Path) *types.Path {
if p == "" {
return nil
}
return &p
}(types.Path(filesystem.Path)),
}
if filesystem.Mount != nil {
newFilesystem.Mount = &types.FilesystemMount{
Device: types.Path(filesystem.Mount.Device),
Format: types.FilesystemFormat(filesystem.Mount.Format),
}
if filesystem.Mount.Create != nil {
newFilesystem.Mount.Create = &types.FilesystemCreate{
Force: filesystem.Mount.Create.Force,
Options: types.MkfsOptions(filesystem.Mount.Create.Options),
}
}
}
out.Storage.Filesystems = append(out.Storage.Filesystems, newFilesystem)
}
for _, file := range in.Storage.Files {
newFile := types.File{
Filesystem: file.Filesystem,
Path: types.Path(file.Path),
Mode: types.FileMode(file.Mode),
User: types.FileUser{Id: file.User.Id},
Group: types.FileGroup{Id: file.Group.Id},
}
if file.Contents.Inline != "" {
newFile.Contents = types.FileContents{
Source: types.Url{
Scheme: "data",
Opaque: "," + dataurl.EscapeString(file.Contents.Inline),
},
}
}
if file.Contents.Remote.Url != "" {
source, err := url.Parse(file.Contents.Remote.Url)
if err != nil {
return types.Config{}, err
}
newFile.Contents = types.FileContents{Source: types.Url(*source)}
}
if newFile.Contents == (types.FileContents{}) {
newFile.Contents = types.FileContents{
Source: types.Url{
Scheme: "data",
Opaque: ",",
},
}
}
newFile.Contents.Compression = types.Compression(file.Contents.Remote.Compression)
newFile.Contents.Verification = convertVerification(file.Contents.Remote.Verification)
out.Storage.Files = append(out.Storage.Files, newFile)
}
for _, unit := range in.Systemd.Units {
newUnit := types.SystemdUnit{
Name: types.SystemdUnitName(unit.Name),
Enable: unit.Enable,
Mask: unit.Mask,
Contents: unit.Contents,
}
for _, dropIn := range unit.DropIns {
newUnit.DropIns = append(newUnit.DropIns, types.SystemdUnitDropIn{
Name: types.SystemdUnitDropInName(dropIn.Name),
Contents: dropIn.Contents,
})
}
out.Systemd.Units = append(out.Systemd.Units, newUnit)
}
for _, unit := range in.Networkd.Units {
out.Networkd.Units = append(out.Networkd.Units, types.NetworkdUnit{
Name: types.NetworkdUnitName(unit.Name),
Contents: unit.Contents,
})
}
for _, user := range in.Passwd.Users {
newUser := types.User{
Name: user.Name,
PasswordHash: user.PasswordHash,
SSHAuthorizedKeys: user.SSHAuthorizedKeys,
}
if user.Create != nil {
newUser.Create = &types.UserCreate{
Uid: user.Create.Uid,
GECOS: user.Create.GECOS,
Homedir: user.Create.Homedir,
NoCreateHome: user.Create.NoCreateHome,
PrimaryGroup: user.Create.PrimaryGroup,
Groups: user.Create.Groups,
NoUserGroup: user.Create.NoUserGroup,
System: user.Create.System,
NoLogInit: user.Create.NoLogInit,
Shell: user.Create.Shell,
}
}
out.Passwd.Users = append(out.Passwd.Users, newUser)
}
for _, group := range in.Passwd.Groups {
out.Passwd.Groups = append(out.Passwd.Groups, types.Group{
Name: group.Name,
Gid: group.Gid,
PasswordHash: group.PasswordHash,
System: group.System,
})
}
if err := out.AssertValid(); err != nil {
return types.Config{}, err
}
return out, nil
}
func convertConfigReference(in ConfigReference) (types.ConfigReference, error) {
source, err := url.Parse(in.Source)
if err != nil {
return types.ConfigReference{}, err
}
return types.ConfigReference{
Source: types.Url(*source),
Verification: convertVerification(in.Verification),
}, nil
}
func convertVerification(in Verification) types.Verification {
if in.Hash.Function == "" || in.Hash.Sum == "" {
return types.Verification{}
}
return types.Verification{
&types.Hash{
Function: in.Hash.Function,
Sum: in.Hash.Sum,
},
}
}
func convertPartitionDimension(in string) (types.PartitionDimension, error) {
if in == "" {
return 0, nil
}
b, err := units.ParseBase2Bytes(in)
if err != nil {
return 0, err
}
if b < 0 {
return 0, fmt.Errorf("invalid dimension (negative): %q", in)
}
// Translate bytes into sectors
sectors := (b / BYTES_PER_SECTOR)
if b%BYTES_PER_SECTOR != 0 {
sectors++
}
return types.PartitionDimension(uint64(sectors)), nil
}

129
vendor/github.com/coreos/fuze/config/types.go generated vendored Normal file
View File

@@ -0,0 +1,129 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
type Config struct {
Ignition struct {
Config struct {
Append []ConfigReference `yaml:"append"`
Replace *ConfigReference `yaml:"replace"`
} `yaml:"config"`
} `yaml:"ignition"`
Storage struct {
Disks []struct {
Device string `yaml:"device"`
WipeTable bool `yaml:"wipe_table"`
Partitions []struct {
Label string `yaml:"label"`
Number int `yaml:"number"`
Size string `yaml:"size"`
Start string `yaml:"start"`
TypeGUID string `yaml:"type_guid"`
} `yaml:"partitions"`
} `yaml:"disks"`
Arrays []struct {
Name string `yaml:"name"`
Level string `yaml:"level"`
Devices []string `yaml:"devices"`
Spares int `yaml:"spares"`
} `yaml:"raid"`
Filesystems []struct {
Name string `yaml:"name"`
Mount *struct {
Device string `yaml:"device"`
Format string `yaml:"format"`
Create *struct {
Force bool `yaml:"force"`
Options []string `yaml:"options"`
} `yaml:"create"`
} `yaml:"mount"`
Path string `yaml:"path"`
} `yaml:"filesystems"`
Files []struct {
Filesystem string `yaml:"filesystem"`
Path string `yaml:"path"`
Contents struct {
Remote struct {
Url string `yaml:"url"`
Compression string `yaml:"compression"`
Verification Verification `yaml:"verification"`
} `yaml:"remote"`
Inline string `yaml:"inline"`
} `yaml:"contents"`
Mode int `yaml:"mode"`
User struct {
Id int `yaml:"id"`
} `yaml:"user"`
Group struct {
Id int `yaml:"id"`
} `yaml:"group"`
} `yaml:"files"`
} `yaml:"storage"`
Systemd struct {
Units []struct {
Name string `yaml:"name"`
Enable bool `yaml:"enable"`
Mask bool `yaml:"mask"`
Contents string `yaml:"contents"`
DropIns []struct {
Name string `yaml:"name"`
Contents string `yaml:"contents"`
} `yaml:"dropins"`
} `yaml:"units"`
} `yaml:"systemd"`
Networkd struct {
Units []struct {
Name string `yaml:"name"`
Contents string `yaml:"contents"`
} `yaml:"units"`
} `yaml:"networkd"`
Passwd struct {
Users []struct {
Name string `yaml:"name"`
PasswordHash string `yaml:"password_hash"`
SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"`
Create *struct {
Uid *uint `yaml:"uid"`
GECOS string `yaml:"gecos"`
Homedir string `yaml:"home_dir"`
NoCreateHome bool `yaml:"no_create_home"`
PrimaryGroup string `yaml:"primary_group"`
Groups []string `yaml:"groups"`
NoUserGroup bool `yaml:"no_user_group"`
System bool `yaml:"system"`
NoLogInit bool `yaml:"no_log_init"`
Shell string `yaml:"shell"`
} `yaml:"create"`
} `yaml:"users"`
Groups []struct {
Name string `yaml:"name"`
Gid *uint `yaml:"gid"`
PasswordHash string `yaml:"password_hash"`
System bool `yaml:"system"`
} `yaml:"groups"`
} `yaml:"passwd"`
}
type ConfigReference struct {
Source string `yaml:"source"`
Verification Verification `yaml:"verification"`
}
type Verification struct {
Hash struct {
Function string `yaml:"function"`
Sum string `yaml:"sum"`
} `yaml:"hash"`
}

63
vendor/github.com/coreos/fuze/config/validate.go generated vendored Normal file
View File

@@ -0,0 +1,63 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"fmt"
"reflect"
)
type ErrKeysUnrecognized []string
func (e ErrKeysUnrecognized) Error() string {
return fmt.Sprintf("unrecognized keys: %v", []string(e))
}
func assertKeysValid(value interface{}, refType reflect.Type) ErrKeysUnrecognized {
var err ErrKeysUnrecognized
if refType.Kind() == reflect.Ptr {
refType = refType.Elem()
}
switch value.(type) {
case map[interface{}]interface{}:
ks := value.(map[interface{}]interface{})
keys:
for key := range ks {
for i := 0; i < refType.NumField(); i++ {
sf := refType.Field(i)
tv := sf.Tag.Get("yaml")
if tv == key {
if serr := assertKeysValid(ks[key], sf.Type); serr != nil {
err = append(err, serr...)
}
continue keys
}
}
err = append(err, fmt.Sprintf("%v", key))
}
case []interface{}:
ks := value.([]interface{})
for i := range ks {
if serr := assertKeysValid(ks[i], refType.Elem()); serr != nil {
err = append(err, serr...)
}
}
default:
}
return err
}

64
vendor/github.com/coreos/fuze/config/validate_test.go generated vendored Normal file
View File

@@ -0,0 +1,64 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"reflect"
"testing"
"github.com/go-yaml/yaml"
)
func TestAssertKeysValid(t *testing.T) {
type in struct {
data string
}
type out struct {
err ErrKeysUnrecognized
}
tests := []struct {
in in
out out
}{
{
in: in{data: "ignition:\n config:"},
out: out{},
},
{
in: in{data: "passwd:\n groups:\n - name: example"},
out: out{},
},
{
in: in{data: "password:\n groups:"},
out: out{err: ErrKeysUnrecognized{"password"}},
},
{
in: in{data: "passwd:\n groups:\n - naem: example"},
out: out{err: ErrKeysUnrecognized{"naem"}},
},
}
for i, test := range tests {
var cfg interface{}
if err := yaml.Unmarshal([]byte(test.in.data), &cfg); err != nil {
t.Errorf("#%d: unmarshal failed: %v", i, err)
continue
}
if err := assertKeysValid(cfg, reflect.TypeOf(Config{})); !reflect.DeepEqual(err, test.out.err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
}
}

5
vendor/github.com/coreos/fuze/config/vendor.manifest generated vendored Normal file
View File

@@ -0,0 +1,5 @@
# If you manipulate the contents of vendor/, amend this accordingly.
# pkg version
github.com/alecthomas/units 6b4e7dc5e3143b85ea77909c72caf89416fc2915
github.com/coreos/ignition/config b6850837b3b9bd17b673e58b5c406b5e4192ddca
github.com/vincent-petithory/dataurl 9a301d65acbb728fcc3ace14f45f511a4cfeea9c

94
vendor/github.com/coreos/fuze/doc/configuration.md generated vendored Normal file
View File

@@ -0,0 +1,94 @@
# Configuration Specification #
The Fuze configuration is a YAML document conforming to the following specification, with **_italicized_** entries being optional:
* **_ignition_** (object): metadata about the configuration itself.
* **_config_** (objects): options related to the configuration.
* **_append_** (list of objects): a list of the configs to be appended to the current config.
* **source** (string): the URL of the config. Supported schemes are http and https. Note: When using http, it is advisable to use the verification option to ensure the contents haven't been modified.
* **_verification_** (object): options related to the verification of the config.
* **_hash_** (object): the hash of the config
* **_function_** (string): the function used to hash the config. Supported functions are sha512.
* **_sum_** (string): the resulting sum of the hash applied to the contents.
* **_replace_** (object): the config that will replace the current.
* **source** (string): the URL of the config. Supported schemes are http and https. Note: When using http, it is advisable to use the verification option to ensure the contents haven't been modified.
* **_verification_** (object): options related to the verification of the config.
* **_hash_** (object): the hash of the config
* **_function_** (string): the function used to hash the config. Supported functions are sha512.
* **_sum_** (string): the resulting sum of the hash applied to the contents.
* **_storage_** (object): describes the desired state of the system's storage devices.
* **_disks_** (list of objects): the list of disks to be configured and their options.
* **device** (string): the absolute path to the device. Devices are typically referenced by the `/dev/disk/by-*` symlinks.
* **_wipe_table_** (boolean): whether or not the partition tables shall be wiped. When true, the partition tables are erased before any further manipulation. Otherwise, the existing entries are left intact.
* **_partitions_** (list of objects): the list of partitions and their configuration for this particular disk.
* **_label_** (string): the PARTLABEL for the partition.
* **_number_** (integer): the partition number, which dictates it's position in the partition table (one-indexed). If zero, use the next available partition slot.
* **_size_** (integer): the size of the partition (in sectors). If zero, the partition will fill the remainder of the disk.
* **_start_** (integer): the start of the partition (in sectors). If zero, the partition will be positioned at the earliest available part of the disk.
* **_type_guid_** (string): the GPT [partition type GUID][part-types]. If omitted, the default will be 0FC63DAF-8483-4772-8E79-3D69D8477DE4 (Linux filesystem data).
* **_raid_** (list of objects): the list of RAID arrays to be configured.
* **name** (string): the name to use for the resulting md device.
* **level** (string): the redundancy level of the array (e.g. linear, raid1, raid5, etc.).
* **devices** (list of strings): the list of devices (referenced by their absolute path) in the array.
* **_spares_** (integer): the number of spares (if applicable) in the array.
* **_filesystems_** (list of objects): the list of filesystems to be configured and/or used in the "files" section. Either "mount" or "path" needs to be specified.
* **_name_** (string): the identifier for the filesystem, internal to Ignition. This is only required if the filesystem needs to be referenced in the "files" section.
* **_mount_** (object): contains the set of mount and formatting options for the filesystem. A non-null entry indicates that the filesystem should be mounted before it is used by Ignition.
* **device** (string): the absolute path to the device. Devices are typically referenced by the `/dev/disk/by-*` symlinks.
* **format** (string): the filesystem format (ext4, btrfs, or xfs).
* **_create_** (object): contains the set of options to be used when creating the filesystem. A non-null entry indicates that the filesystem shall be created.
* **_force_** (boolean): whether or not the create operation shall overwrite an existing filesystem.
* **_options_** (list of strings): any additional options to be passed to the format-specific mkfs utility.
* **_path_** (string): the mount-point of the filesystem. A non-null entry indicates that the filesystem has already been mounted by the system at the specified path. This is really only useful for "/sysroot".
* **_files_** (list of objects): the list of files, rooted in this particular filesystem, to be written.
* **filesystem** (string): the internal identifier of the filesystem. This matches the last filesystem with the given identifier.
* **path** (string): the absolute path to the file.
* **_contents_** (object): options related to the contents of the file.
* **_inline_** (string): the contents of the file.
* **_remote_** (object): options related to the fetching of remote file contents.
* **_compression_** (string): the type of compression used on the contents (null or gzip)
* **_source_** (string): the URL of the file contents. Supported schemes are http and [data][rfc2397]. Note: When using http, it is advisable to use the verification option to ensure the contents haven't been modified.
* **_verification_** (object): options related to the verification of the file contents.
* **_hash_** (object): the hash of the config
* **_function_** (string): the function used to hash the config. Supported functions are sha512.
* **_sum_** (string): the resulting sum of the hash applied to the contents.
* **_mode_** (integer): the file's permission mode.
* **_user_** (object): specifies the file's owner.
* **_id_** (integer): the user ID of the owner.
* **_group_** (object): specifies the group of the owner.
* **_id_** (integer): the group ID of the owner.
* **_systemd_** (object): describes the desired state of the systemd units.
* **_units_** (list of objects): the list of systemd units.
* **name** (string): the name of the unit. This must be suffixed with a valid unit type (e.g. "thing.service").
* **_enable_** (boolean): whether or not the service shall be enabled. When true, the service is enabled. In order for this to have any effect, the unit must have an install section.
* **_mask_** (boolean): whether or not the service shall be masked. When true, the service is masked by symlinking it to `/dev/null`.
* **_contents_** (string): the contents of the unit.
* **_dropins_** (list of objects): the list of drop-ins for the unit.
* **name** (string): the name of the drop-in. This must be suffixed with ".conf".
* **_contents_** (string): the contents of the drop-in.
* **_networkd_** (object): describes the desired state of the networkd files.
* **_units_** (list of objects): the list of networkd files.
* **name** (string): the name of the file. This must be suffixed with a valid unit type (e.g. "00-eth0.network").
* **_contents_** (string): the contents of the networkd file.
* **_passwd_** (object): describes the desired additions to the passwd database.
* **_users_** (list of objects): the list of accounts to be added.
* **name** (string): the username for the account.
* **_password_hash_** (string): the encrypted password for the account.
* **_ssh_authorized_keys_** (list of strings): a list of SSH keys to be added to the user's authorized_keys.
* **_create_** (object): contains the set of options to be used when creating the user. A non-null entry indicates that the user account shall be created.
* **_uid_** (integer): the user ID of the new account.
* **_gecos_** (string): the GECOS field of the new account.
* **_home_dir_** (string): the home directory of the new account.
* **_no_create_home_** (boolean): whether or not to create the user's home directory.
* **_primary_group_** (string): the name or ID of the primary group of the new account.
* **_groups_** (list of strings): the list of supplementary groups of the new account.
* **_no_user_group_** (boolean): whether or not to create a group with the same name as the user.
* **_no_log_init_** (boolean): whether or not to add the user to the lastlog and faillog databases.
* **_shell_** (string): the login shell of the new account.
* **_groups_** (list of objects): the list of groups to be added.
* **name** (string): the name of the group.
* **_gid_** (integer): the group ID of the new group.
* **_password_hash_** (string): the encrypted password of the new group.
[part-types]: http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs
[rfc2397]: https://tools.ietf.org/html/rfc2397

81
vendor/github.com/coreos/fuze/internal/main.go generated vendored Normal file
View File

@@ -0,0 +1,81 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/coreos/fuze/config"
)
func stderr(f string, a ...interface{}) {
out := fmt.Sprintf(f, a...)
fmt.Fprintln(os.Stderr, strings.TrimSuffix(out, "\n"))
}
func main() {
flags := struct {
help bool
pretty bool
inFile string
outFile string
}{}
flag.BoolVar(&flags.help, "help", false, "print help and exit")
flag.BoolVar(&flags.pretty, "pretty", false, "indent the output file")
flag.StringVar(&flags.inFile, "in-file", "/dev/stdin", "input file (YAML)")
flag.StringVar(&flags.outFile, "out-file", "/dev/stdout", "output file (JSON)")
flag.Parse()
if flags.help {
flag.Usage()
return
}
dataIn, err := ioutil.ReadFile(flags.inFile)
if err != nil {
stderr("Failed to read: %v", err)
os.Exit(1)
}
cfg, err := config.ParseAsV2_0_0(dataIn)
if err != nil {
stderr("Failed to parse: %v", err)
os.Exit(1)
}
var dataOut []byte
if flags.pretty {
dataOut, err = json.MarshalIndent(&cfg, "", " ")
dataOut = append(dataOut, '\n')
} else {
dataOut, err = json.Marshal(&cfg)
}
if err != nil {
stderr("Failed to marshal output: %v", err)
os.Exit(1)
}
if err := ioutil.WriteFile(flags.outFile, dataOut, 0640); err != nil {
stderr("Failed to write: %v", err)
os.Exit(1)
}
}

32
vendor/github.com/coreos/fuze/test generated vendored Executable file
View File

@@ -0,0 +1,32 @@
#!/bin/bash -eu
source ./build
SRC=$(find . -name '*.go' \
-not -path "./config/vendor/*")
PKG=$(cd gopath/src/${REPO_PATH}; go list ./... | \
grep --invert-match vendor)
# https://github.com/golang/go/issues/15067
PKG_VET=$(cd gopath/src/${REPO_PATH}; go list ./... | \
grep --invert-match vendor | \
grep --invert-match internal/log)
echo "Checking gofix..."
go tool fix -diff $SRC
echo "Checking gofmt..."
res=$(gofmt -d -e -s $SRC)
echo "${res}"
if [ -n "${res}" ]; then
exit 1
fi
echo "Checking govet..."
go vet $PKG_VET
echo "Running tests..."
go test -timeout 60s -cover $@ ${PKG} --race
echo "Success"

View File

@@ -1,12 +1,35 @@
sudo: required
dist: trusty
language: go
matrix:
include:
- go: 1.5
env: GO15VENDOREXPERIMENT=1
- go: 1.6
go_import_path: github.com/coreos/ignition
go:
- 1.5
- 1.6
env:
global:
- GO15VENDOREXPERIMENT=1
matrix:
- TARGET=amd64
- TARGET=arm64
addons:
apt:
packages:
# install cross compilers for cgo support.
- gcc-aarch64-linux-gnu
- libc6-dev-arm64-cross
install:
-
script:
- ./test
- if [ "${TARGET}" == "amd64" ]; then
GOARCH="${TARGET}" ./test;
elif [ "${TARGET}" == "arm64" ]; then
eval "$(GIMME_ARCH=${TARGET} GIMME_CGO_ENABLED=1 ./gimme.local ${TRAVIS_GO_VERSION})";
GOARCH="${TARGET}" ./build;
file "bin/${TARGET}/ignition" | egrep 'aarch64';
fi

View File

@@ -1,3 +1,42 @@
15-Jun-2016 IGNITION v0.7.0
Features
- Allow HTTPS URLs
Bug Fixes
- Don't overwrite existing data when formatting ext4 unless force is set
- Ensure service unit in /etc doesn't exist before masking
- Capture and log stdout of subprocesses
Changes
- Drop YAML tags from the config package
18-May-2016 IGNITION v0.6.0
Features
- All URL schemes (currently http, oem, and data) are now supported
everywhere a URL can be provided
- Add base OEM and default user configurations for GCE
04-May-2016 IGNITION v0.5.0
Features
- Add support for GCE
Bug Fixes
- Write files after users and home directories are created
Changes
- Strip support for EC2 SSH keys (these are handled by coreos-metadata now)
- Add OEM-specific base configs and execute even if user config is empty
05-Apr-2016 IGNITION v0.4.0
Features

View File

@@ -10,6 +10,6 @@ Odds are good that you don't want to invoke Ignition directly. In fact, it isn't
**Ignition is under very active development!**
Please check out the [roadmap](ROADMAP.md) for information about the timeline. Use the [bug tracker][issues] to report bugs, but please avoid the urge to report lack of features for now.
Use the [bug tracker][issues] to report bugs, but please avoid the urge to report lack of features for now.
[issues]: https://github.com/coreos/bugs/issues/new?labels=component/ignition

View File

@@ -1,18 +0,0 @@
# Ignition roadmap #
**work in progress**
This document defines a high level roadmap for Ignition development. The dates below should not be considered authoritative, but rather indicative of the projected timeline of the project.
## Ignition 0.4 (Apr) ##
- support for version 2.0.0 of the specification
- remote file contents
- chain-loaded configs
- RAM-based filesystems
## Ignition 0.5 (May) ##
- support for more config providers
- GCE
- OpenStack

View File

@@ -11,10 +11,11 @@ if [ ! -h gopath/src/${REPO_PATH} ]; then
ln -s ../../../.. gopath/src/${REPO_PATH} || exit 255
fi
export GOBIN=${PWD}/bin
export GOPATH=${PWD}/gopath
eval $(go env)
export GOBIN=${PWD}/bin/${GOARCH}
export GOPATH=${PWD}/gopath
export CGO_ENABLED=1
echo "Building ${NAME}..."
go build -ldflags "${GLDFLAGS}" -o ${GOBIN}/${NAME} ${REPO_PATH}/internal

View File

@@ -25,10 +25,6 @@ var (
type Compression string
func (c *Compression) UnmarshalYAML(unmarshal func(interface{}) error) error {
return c.unmarshal(unmarshal)
}
func (c *Compression) UnmarshalJSON(data []byte) error {
return c.unmarshal(func(tc interface{}) error {
return json.Unmarshal(data, tc)
@@ -41,12 +37,12 @@ func (c *Compression) unmarshal(unmarshal func(interface{}) error) error {
return err
}
*c = Compression(tc)
return c.assertValid()
return c.AssertValid()
}
func (c Compression) assertValid() error {
func (c Compression) AssertValid() error {
switch c {
case "gzip":
case "", "gzip":
default:
return ErrCompressionInvalid
}

View File

@@ -15,6 +15,8 @@
package types
import (
"reflect"
"github.com/coreos/go-semver/semver"
)
@@ -26,9 +28,51 @@ var (
)
type Config struct {
Ignition Ignition `json:"ignition" yaml:"ignition"`
Storage Storage `json:"storage,omitempty" yaml:"storage"`
Systemd Systemd `json:"systemd,omitempty" yaml:"systemd"`
Networkd Networkd `json:"networkd,omitempty" yaml:"networkd"`
Passwd Passwd `json:"passwd,omitempty" yaml:"passwd"`
Ignition Ignition `json:"ignition"`
Storage Storage `json:"storage,omitempty"`
Systemd Systemd `json:"systemd,omitempty"`
Networkd Networkd `json:"networkd,omitempty"`
Passwd Passwd `json:"passwd,omitempty"`
}
func (c Config) AssertValid() error {
return assertStructValid(reflect.ValueOf(c))
}
func assertValid(vObj reflect.Value) error {
if !vObj.IsValid() {
return nil
}
if obj, ok := vObj.Interface().(interface {
AssertValid() error
}); ok && !(vObj.Kind() == reflect.Ptr && vObj.IsNil()) {
if err := obj.AssertValid(); err != nil {
return err
}
}
switch vObj.Kind() {
case reflect.Ptr:
return assertValid(vObj.Elem())
case reflect.Struct:
return assertStructValid(vObj)
case reflect.Slice:
for i := 0; i < vObj.Len(); i++ {
if err := assertValid(vObj.Index(i)); err != nil {
return err
}
}
}
return nil
}
func assertStructValid(vObj reflect.Value) error {
for i := 0; i < vObj.Type().NumField(); i++ {
if err := assertValid(vObj.Field(i)); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,104 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"errors"
"reflect"
"testing"
)
func TestAssertValid(t *testing.T) {
type in struct {
cfg Config
}
type out struct {
err error
}
tests := []struct {
in in
out out
}{
{
in: in{cfg: Config{Ignition: Ignition{Version: IgnitionVersion{Major: 2}}}},
out: out{},
},
{
in: in{cfg: Config{}},
out: out{err: ErrOldVersion},
},
{
in: in{cfg: Config{
Ignition: Ignition{
Version: IgnitionVersion{Major: 2},
Config: IgnitionConfig{
Replace: &ConfigReference{
Verification: Verification{
Hash: &Hash{Function: "foobar"},
},
},
},
},
}},
out: out{errors.New("unrecognized hash function")},
},
{
in: in{cfg: Config{
Ignition: Ignition{Version: IgnitionVersion{Major: 2}},
Storage: Storage{
Filesystems: []Filesystem{
{
Name: "filesystem1",
Mount: &FilesystemMount{
Device: Path("/dev/disk/by-partlabel/ROOT"),
Format: FilesystemFormat("btrfs"),
},
},
},
},
}},
out: out{},
},
{
in: in{cfg: Config{
Ignition: Ignition{Version: IgnitionVersion{Major: 2}},
Storage: Storage{
Filesystems: []Filesystem{
{
Name: "filesystem1",
Path: func(p Path) *Path { return &p }("/sysroot"),
},
},
},
}},
out: out{},
},
{
in: in{cfg: Config{
Ignition: Ignition{Version: IgnitionVersion{Major: 2}},
Systemd: Systemd{Units: []SystemdUnit{{Name: "foo.bar"}}},
}},
out: out{err: errors.New("invalid systemd unit extension")},
},
}
for i, test := range tests {
err := test.in.cfg.AssertValid()
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
}
}

View File

@@ -20,44 +20,22 @@ import (
)
type Disk struct {
Device Path `json:"device,omitempty" yaml:"device"`
WipeTable bool `json:"wipeTable,omitempty" yaml:"wipe_table"`
Partitions []Partition `json:"partitions,omitempty" yaml:"partitions"`
Device Path `json:"device,omitempty"`
WipeTable bool `json:"wipeTable,omitempty"`
Partitions []Partition `json:"partitions,omitempty"`
}
func (n *Disk) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err := n.unmarshal(unmarshal); err != nil {
return err
}
if err := n.preparePartitions(); err != nil {
return err
}
return n.assertValid()
}
func (n *Disk) UnmarshalJSON(data []byte) error {
err := n.unmarshal(func(tn interface{}) error {
return json.Unmarshal(data, tn)
})
if err != nil {
return err
}
return n.assertValid()
}
type disk Disk
func (n *Disk) unmarshal(unmarshal func(interface{}) error) error {
func (n *Disk) UnmarshalJSON(data []byte) error {
tn := disk(*n)
if err := unmarshal(&tn); err != nil {
if err := json.Unmarshal(data, &tn); err != nil {
return err
}
*n = Disk(tn)
return nil
return n.AssertValid()
}
func (n Disk) assertValid() error {
// This applies to YAML (post-prepare) and JSON unmarshals equally:
func (n Disk) AssertValid() error {
if len(n.Device) == 0 {
return fmt.Errorf("disk device is required")
}
@@ -140,28 +118,3 @@ func (n Disk) partitionsMisaligned() bool {
}
return false
}
// preparePartitions performs some checks and potentially adjusts the partitions for alignment.
// This is only invoked when unmarshalling YAML, since there we parse human-friendly units.
func (n *Disk) preparePartitions() error {
// On the YAML side we accept human-friendly units which may require massaging.
// align partition starts
for i := range n.Partitions {
// skip automatically placed partitions
if n.Partitions[i].Start == 0 {
continue
}
// keep partitions out of the first 2048 sectors
if n.Partitions[i].Start < 2048 {
n.Partitions[i].Start = 2048
}
// toss the bottom 11 bits
n.Partitions[i].Start &= ^PartitionDimension(2048 - 1)
}
// TODO(vc): may be interesting to do something about potentially overlapping partitions
return nil
}

View File

@@ -25,52 +25,41 @@ var (
)
type File struct {
Filesystem string `json:"filesystem,omitempty" yaml:"filesystem"`
Path Path `json:"path,omitempty" yaml:"path"`
Contents FileContents `json:"contents,omitempty" yaml:"contents"`
Mode FileMode `json:"mode,omitempty" yaml:"mode"`
User FileUser `json:"user,omitempty" yaml:"uid"`
Group FileGroup `json:"group,omitempty" yaml:"gid"`
Filesystem string `json:"filesystem,omitempty"`
Path Path `json:"path,omitempty"`
Contents FileContents `json:"contents,omitempty"`
Mode FileMode `json:"mode,omitempty"`
User FileUser `json:"user,omitempty"`
Group FileGroup `json:"group,omitempty"`
}
type FileUser struct {
Id int `json:"id,omitempty" yaml:"id"`
Id int `json:"id,omitempty"`
}
type FileGroup struct {
Id int `json:"id,omitempty" yaml:"id"`
Id int `json:"id,omitempty"`
}
type FileContents struct {
Compression Compression `json:"compression,omitempty" yaml:"compression"`
Source Url `json:"source,omitempty" yaml:"source"`
Verification Verification `json:"verification,omitempty" yaml:"verification"`
Compression Compression `json:"compression,omitempty"`
Source Url `json:"source,omitempty"`
Verification Verification `json:"verification,omitempty"`
}
type FileMode os.FileMode
func (m *FileMode) UnmarshalYAML(unmarshal func(interface{}) error) error {
return m.unmarshal(unmarshal)
}
func (m *FileMode) UnmarshalJSON(data []byte) error {
return m.unmarshal(func(tm interface{}) error {
return json.Unmarshal(data, tm)
})
}
type fileMode FileMode
func (m *FileMode) unmarshal(unmarshal func(interface{}) error) error {
func (m *FileMode) UnmarshalJSON(data []byte) error {
tm := fileMode(*m)
if err := unmarshal(&tm); err != nil {
if err := json.Unmarshal(data, &tm); err != nil {
return err
}
*m = FileMode(tm)
return m.assertValid()
return m.AssertValid()
}
func (m FileMode) assertValid() error {
func (m FileMode) AssertValid() error {
if (m &^ 07777) != 0 {
return ErrFileIllegalMode
}

View File

@@ -18,8 +18,6 @@ import (
"encoding/json"
"reflect"
"testing"
"github.com/go-yaml/yaml"
)
func TestFileModeUnmarshalJSON(t *testing.T) {
@@ -57,45 +55,6 @@ func TestFileModeUnmarshalJSON(t *testing.T) {
}
}
func TestFileModeUnmarshalYAML(t *testing.T) {
type in struct {
data string
}
type out struct {
mode FileMode
err error
}
tests := []struct {
in in
out out
}{
{
in: in{data: `0644`},
out: out{mode: FileMode(0644)},
},
{
in: in{data: `0420`},
out: out{mode: FileMode(0420)},
},
{
in: in{data: `017777`},
out: out{mode: FileMode(017777), err: ErrFileIllegalMode},
},
}
for i, test := range tests {
var mode FileMode
err := yaml.Unmarshal([]byte(test.in.data), &mode)
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
if !reflect.DeepEqual(test.out.mode, mode) {
t.Errorf("#%d: bad mode: want %#o, got %#o", i, test.out.mode, mode)
}
}
}
func TestFileAssertValid(t *testing.T) {
type in struct {
mode FileMode
@@ -131,7 +90,7 @@ func TestFileAssertValid(t *testing.T) {
}
for i, test := range tests {
err := test.in.mode.assertValid()
err := test.in.mode.AssertValid()
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}

View File

@@ -26,57 +26,46 @@ var (
)
type Filesystem struct {
Name string `json:"name,omitempty" yaml:"name"`
Mount *FilesystemMount `json:"mount,omitempty" yaml:"mount"`
Path Path `json:"path,omitempty" yaml:"path"`
Name string `json:"name,omitempty"`
Mount *FilesystemMount `json:"mount,omitempty"`
Path *Path `json:"path,omitempty"`
}
type filesystem Filesystem
type FilesystemMount struct {
Device Path `json:"device,omitempty" yaml:"device"`
Format FilesystemFormat `json:"format,omitempty" yaml:"format"`
Create *FilesystemCreate `json:"create,omitempty" yaml:"create"`
Device Path `json:"device,omitempty"`
Format FilesystemFormat `json:"format,omitempty"`
Create *FilesystemCreate `json:"create,omitempty"`
}
type FilesystemCreate struct {
Force bool `json:"force,omitempty" yaml:"force"`
Options MkfsOptions `json:"options,omitempty" yaml:"options"`
}
func (f *Filesystem) UnmarshalYAML(unmarshal func(interface{}) error) error {
return f.unmarshal(unmarshal)
Force bool `json:"force,omitempty"`
Options MkfsOptions `json:"options,omitempty"`
}
func (f *Filesystem) UnmarshalJSON(data []byte) error {
return f.unmarshal(func(tf interface{}) error {
return json.Unmarshal(data, tf)
})
}
type filesystem Filesystem
func (f *Filesystem) unmarshal(unmarshal func(interface{}) error) error {
tf := filesystem(*f)
if err := unmarshal(&tf); err != nil {
if err := json.Unmarshal(data, &tf); err != nil {
return err
}
*f = Filesystem(tf)
return f.assertValid()
return f.AssertValid()
}
func (f Filesystem) assertValid() error {
func (f Filesystem) AssertValid() error {
hasMount := false
hasPath := false
if f.Mount != nil {
hasMount = true
if err := f.Mount.assertValid(); err != nil {
if err := f.Mount.AssertValid(); err != nil {
return err
}
}
if len(f.Path) != 0 {
if f.Path != nil {
hasPath = true
if err := f.Path.assertValid(); err != nil {
if err := f.Path.AssertValid(); err != nil {
return err
}
}
@@ -90,61 +79,40 @@ func (f Filesystem) assertValid() error {
return nil
}
func (f *FilesystemMount) UnmarshalYAML(unmarshal func(interface{}) error) error {
return f.unmarshal(unmarshal)
}
func (f *FilesystemMount) UnmarshalJSON(data []byte) error {
return f.unmarshal(func(tf interface{}) error {
return json.Unmarshal(data, tf)
})
}
type filesystemMount FilesystemMount
func (f *FilesystemMount) unmarshal(unmarshal func(interface{}) error) error {
func (f *FilesystemMount) UnmarshalJSON(data []byte) error {
tf := filesystemMount(*f)
if err := unmarshal(&tf); err != nil {
if err := json.Unmarshal(data, &tf); err != nil {
return err
}
*f = FilesystemMount(tf)
return f.assertValid()
return f.AssertValid()
}
func (f FilesystemMount) assertValid() error {
if err := f.Device.assertValid(); err != nil {
func (f FilesystemMount) AssertValid() error {
if err := f.Device.AssertValid(); err != nil {
return err
}
if err := f.Format.assertValid(); err != nil {
if err := f.Format.AssertValid(); err != nil {
return err
}
return nil
}
type FilesystemFormat string
func (f *FilesystemFormat) UnmarshalYAML(unmarshal func(interface{}) error) error {
return f.unmarshal(unmarshal)
}
func (f *FilesystemFormat) UnmarshalJSON(data []byte) error {
return f.unmarshal(func(tf interface{}) error {
return json.Unmarshal(data, tf)
})
}
type filesystemFormat FilesystemFormat
func (f *FilesystemFormat) unmarshal(unmarshal func(interface{}) error) error {
func (f *FilesystemFormat) UnmarshalJSON(data []byte) error {
tf := filesystemFormat(*f)
if err := unmarshal(&tf); err != nil {
if err := json.Unmarshal(data, &tf); err != nil {
return err
}
*f = FilesystemFormat(tf)
return f.assertValid()
return f.AssertValid()
}
func (f FilesystemFormat) assertValid() error {
func (f FilesystemFormat) AssertValid() error {
switch f {
case "ext4", "btrfs", "xfs":
return nil
@@ -154,28 +122,17 @@ func (f FilesystemFormat) assertValid() error {
}
type MkfsOptions []string
func (o *MkfsOptions) UnmarshalYAML(unmarshal func(interface{}) error) error {
return o.unmarshal(unmarshal)
}
func (o *MkfsOptions) UnmarshalJSON(data []byte) error {
return o.unmarshal(func(to interface{}) error {
return json.Unmarshal(data, to)
})
}
type mkfsOptions MkfsOptions
func (o *MkfsOptions) unmarshal(unmarshal func(interface{}) error) error {
func (o *MkfsOptions) UnmarshalJSON(data []byte) error {
to := mkfsOptions(*o)
if err := unmarshal(&to); err != nil {
if err := json.Unmarshal(data, &to); err != nil {
return err
}
*o = MkfsOptions(to)
return o.assertValid()
return o.AssertValid()
}
func (o MkfsOptions) assertValid() error {
func (o MkfsOptions) AssertValid() error {
return nil
}

View File

@@ -18,8 +18,6 @@ import (
"encoding/json"
"reflect"
"testing"
"github.com/go-yaml/yaml"
)
func TestFilesystemFormatUnmarshalJSON(t *testing.T) {
@@ -57,41 +55,6 @@ func TestFilesystemFormatUnmarshalJSON(t *testing.T) {
}
}
func TestFilesystemFormatUnmarshalYAML(t *testing.T) {
type in struct {
data string
}
type out struct {
format FilesystemFormat
err error
}
tests := []struct {
in in
out out
}{
{
in: in{data: `"ext4"`},
out: out{format: FilesystemFormat("ext4")},
},
{
in: in{data: `"bad"`},
out: out{format: FilesystemFormat("bad"), err: ErrFilesystemInvalidFormat},
},
}
for i, test := range tests {
var format FilesystemFormat
err := yaml.Unmarshal([]byte(test.in.data), &format)
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
if !reflect.DeepEqual(test.out.format, format) {
t.Errorf("#%d: bad format: want %#v, got %#v", i, test.out.format, format)
}
}
}
func TestFilesystemFormatAssertValid(t *testing.T) {
type in struct {
format FilesystemFormat
@@ -119,7 +82,7 @@ func TestFilesystemFormatAssertValid(t *testing.T) {
}
for i, test := range tests {
err := test.in.format.assertValid()
err := test.in.format.AssertValid()
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
@@ -157,37 +120,6 @@ func TestMkfsOptionsUnmarshalJSON(t *testing.T) {
}
}
func TestMkfsOptionsUnmarshalYAML(t *testing.T) {
type in struct {
data string
}
type out struct {
options MkfsOptions
err error
}
tests := []struct {
in in
out out
}{
{
in: in{data: `["--label=ROOT"]`},
out: out{options: MkfsOptions([]string{"--label=ROOT"})},
},
}
for i, test := range tests {
var options MkfsOptions
err := yaml.Unmarshal([]byte(test.in.data), &options)
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
if !reflect.DeepEqual(test.out.options, options) {
t.Errorf("#%d: bad device: want %#v, got %#v", i, test.out.options, options)
}
}
}
func TestFilesystemUnmarshalJSON(t *testing.T) {
type in struct {
data string
@@ -223,41 +155,6 @@ func TestFilesystemUnmarshalJSON(t *testing.T) {
}
}
func TestFilesystemUnmarshalYAML(t *testing.T) {
type in struct {
data string
}
type out struct {
filesystem Filesystem
err error
}
tests := []struct {
in in
out out
}{
{
in: in{data: "mount:\n device: /foo\n format: ext4"},
out: out{filesystem: Filesystem{Mount: &FilesystemMount{Device: "/foo", Format: "ext4"}}},
},
{
in: in{data: "mount:\n format: ext4"},
out: out{err: ErrPathRelative},
},
}
for i, test := range tests {
var filesystem Filesystem
err := yaml.Unmarshal([]byte(test.in.data), &filesystem)
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
if !reflect.DeepEqual(test.out.filesystem, filesystem) {
t.Errorf("#%d: bad filesystem: want %#v, got %#v", i, test.out.filesystem, filesystem)
}
}
}
func TestFilesystemAssertValid(t *testing.T) {
type in struct {
filesystem Filesystem
@@ -283,15 +180,15 @@ func TestFilesystemAssertValid(t *testing.T) {
out: out{err: ErrPathRelative},
},
{
in: in{filesystem: Filesystem{Path: Path("/mount")}},
in: in{filesystem: Filesystem{Path: func(p Path) *Path { return &p }("/mount")}},
out: out{},
},
{
in: in{filesystem: Filesystem{Path: Path("mount")}},
in: in{filesystem: Filesystem{Path: func(p Path) *Path { return &p }("mount")}},
out: out{err: ErrPathRelative},
},
{
in: in{filesystem: Filesystem{Path: Path("/mount"), Mount: &FilesystemMount{Device: "/foo", Format: "ext4"}}},
in: in{filesystem: Filesystem{Path: func(p Path) *Path { return &p }("/mount"), Mount: &FilesystemMount{Device: "/foo", Format: "ext4"}}},
out: out{err: ErrFilesystemMountAndPath},
},
{
@@ -301,7 +198,7 @@ func TestFilesystemAssertValid(t *testing.T) {
}
for i, test := range tests {
err := test.in.filesystem.assertValid()
err := test.in.filesystem.AssertValid()
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}

View File

@@ -15,8 +15,8 @@
package types
type Group struct {
Name string `json:"name,omitempty" yaml:"name"`
Gid *uint `json:"gid,omitempty" yaml:"gid"`
PasswordHash string `json:"passwordHash,omitempty" yaml:"password_hash"`
System bool `json:"system,omitempty" yaml:"system"`
Name string `json:"name,omitempty"`
Gid *uint `json:"gid,omitempty"`
PasswordHash string `json:"passwordHash,omitempty"`
System bool `json:"system,omitempty"`
}

View File

@@ -33,23 +33,9 @@ type Hash struct {
Sum string
}
func (h *Hash) UnmarshalYAML(unmarshal func(interface{}) error) error {
return h.unmarshal(unmarshal)
}
func (h *Hash) UnmarshalJSON(data []byte) error {
return h.unmarshal(func(th interface{}) error {
return json.Unmarshal(data, th)
})
}
func (h Hash) MarshalJSON() ([]byte, error) {
return []byte(`"` + h.Function + "-" + h.Sum + `"`), nil
}
func (h *Hash) unmarshal(unmarshal func(interface{}) error) error {
var th string
if err := unmarshal(&th); err != nil {
if err := json.Unmarshal(data, &th); err != nil {
return err
}
@@ -61,10 +47,14 @@ func (h *Hash) unmarshal(unmarshal func(interface{}) error) error {
h.Function = parts[0]
h.Sum = parts[1]
return h.assertValid()
return h.AssertValid()
}
func (h Hash) assertValid() error {
func (h Hash) MarshalJSON() ([]byte, error) {
return []byte(`"` + h.Function + "-" + h.Sum + `"`), nil
}
func (h Hash) AssertValid() error {
var hash crypto.Hash
switch h.Function {
case "sha512":

View File

@@ -18,8 +18,6 @@ import (
"encoding/json"
"reflect"
"testing"
"github.com/go-yaml/yaml"
)
func TestHashUnmarshalJSON(t *testing.T) {
@@ -57,41 +55,6 @@ func TestHashUnmarshalJSON(t *testing.T) {
}
}
func TestHashUnmarshalYAML(t *testing.T) {
type in struct {
data string
}
type out struct {
hash Hash
err error
}
tests := []struct {
in in
out out
}{
{
in: in{data: `sha512-0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef`},
out: out{hash: Hash{Function: "sha512", Sum: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"}},
},
{
in: in{data: `xor01234567`},
out: out{err: ErrHashMalformed},
},
}
for i, test := range tests {
var hash Hash
err := yaml.Unmarshal([]byte(test.in.data), &hash)
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
if !reflect.DeepEqual(test.out.hash, hash) {
t.Errorf("#%d: bad hash: want %+v, got %+v", i, test.out.hash, hash)
}
}
}
func TestHashAssertValid(t *testing.T) {
type in struct {
hash Hash
@@ -123,7 +86,7 @@ func TestHashAssertValid(t *testing.T) {
}
for i, test := range tests {
err := test.in.hash.assertValid()
err := test.in.hash.AssertValid()
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}

View File

@@ -27,45 +27,35 @@ var (
)
type Ignition struct {
Version IgnitionVersion `json:"version,omitempty" yaml:"version" merge:"old"`
Config IgnitionConfig `json:"config,omitempty" yaml:"config" merge:"new"`
Version IgnitionVersion `json:"version,omitempty" merge:"old"`
Config IgnitionConfig `json:"config,omitempty" merge:"new"`
}
type IgnitionConfig struct {
Append []ConfigReference `json:"append,omitempty" yaml:"append"`
Replace *ConfigReference `json:"replace,omitempty" yaml:"replace"`
Append []ConfigReference `json:"append,omitempty"`
Replace *ConfigReference `json:"replace,omitempty"`
}
type ConfigReference struct {
Source Url `json:"source,omitempty" yaml:"source"`
Verification Verification `json:"verification,omitempty" yaml:"verification"`
Source Url `json:"source,omitempty"`
Verification Verification `json:"verification,omitempty"`
}
type IgnitionVersion semver.Version
func (v *IgnitionVersion) UnmarshalYAML(unmarshal func(interface{}) error) error {
return v.unmarshal(unmarshal)
}
func (v *IgnitionVersion) UnmarshalJSON(data []byte) error {
return v.unmarshal(func(tv interface{}) error {
return json.Unmarshal(data, tv)
})
}
func (v IgnitionVersion) MarshalJSON() ([]byte, error) {
return semver.Version(v).MarshalJSON()
}
func (v *IgnitionVersion) unmarshal(unmarshal func(interface{}) error) error {
tv := semver.Version(*v)
if err := unmarshal(&tv); err != nil {
if err := json.Unmarshal(data, &tv); err != nil {
return err
}
*v = IgnitionVersion(tv)
return nil
}
func (v IgnitionVersion) MarshalJSON() ([]byte, error) {
return semver.Version(v).MarshalJSON()
}
func (v IgnitionVersion) AssertValid() error {
if MaxVersion.Major > v.Major {
return ErrOldVersion

View File

@@ -15,5 +15,5 @@
package types
type Networkd struct {
Units []NetworkdUnit `json:"units,omitempty" yaml:"units"`
Units []NetworkdUnit `json:"units,omitempty"`
}

View File

@@ -18,42 +18,29 @@ import (
"encoding/json"
"fmt"
"regexp"
"github.com/alecthomas/units"
)
type Partition struct {
Label PartitionLabel `json:"label,omitempty" yaml:"label"`
Number int `json:"number" yaml:"number"`
Size PartitionDimension `json:"size" yaml:"size"`
Start PartitionDimension `json:"start" yaml:"start"`
TypeGUID PartitionTypeGUID `json:"typeGuid,omitempty" yaml:"type_guid"`
Label PartitionLabel `json:"label,omitempty"`
Number int `json:"number"`
Size PartitionDimension `json:"size"`
Start PartitionDimension `json:"start"`
TypeGUID PartitionTypeGUID `json:"typeGuid,omitempty"`
}
type PartitionLabel string
func (n *PartitionLabel) UnmarshalYAML(unmarshal func(interface{}) error) error {
return n.unmarshal(unmarshal)
}
func (n *PartitionLabel) UnmarshalJSON(data []byte) error {
return n.unmarshal(func(tn interface{}) error {
return json.Unmarshal(data, tn)
})
}
type partitionLabel PartitionLabel
func (n *PartitionLabel) unmarshal(unmarshal func(interface{}) error) error {
func (n *PartitionLabel) UnmarshalJSON(data []byte) error {
tn := partitionLabel(*n)
if err := unmarshal(&tn); err != nil {
if err := json.Unmarshal(data, &tn); err != nil {
return err
}
*n = PartitionLabel(tn)
return n.assertValid()
return n.AssertValid()
}
func (n PartitionLabel) assertValid() error {
func (n PartitionLabel) AssertValid() error {
// http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_entries:
// 56 (0x38) 72 bytes Partition name (36 UTF-16LE code units)
@@ -67,33 +54,7 @@ func (n PartitionLabel) assertValid() error {
type PartitionDimension uint64
func (n *PartitionDimension) UnmarshalYAML(unmarshal func(interface{}) error) error {
// In YAML we allow human-readable dimensions like GiB/TiB etc.
var str string
if err := unmarshal(&str); err != nil {
return err
}
b2b, err := units.ParseBase2Bytes(str) // TODO(vc): replace the units package
if err != nil {
return err
}
if b2b < 0 {
return fmt.Errorf("negative value inappropriate: %q", str)
}
// Translate bytes into sectors
sectors := (b2b / 512)
if b2b%512 != 0 {
sectors++
}
*n = PartitionDimension(uint64(sectors))
return nil
}
func (n *PartitionDimension) UnmarshalJSON(data []byte) error {
// In JSON we expect plain integral sectors.
// The YAML->JSON conversion is responsible for normalizing human units to sectors.
var pd uint64
if err := json.Unmarshal(data, &pd); err != nil {
return err
@@ -103,30 +64,19 @@ func (n *PartitionDimension) UnmarshalJSON(data []byte) error {
}
type PartitionTypeGUID string
func (d *PartitionTypeGUID) UnmarshalYAML(unmarshal func(interface{}) error) error {
return d.unmarshal(unmarshal)
}
func (d *PartitionTypeGUID) UnmarshalJSON(data []byte) error {
return d.unmarshal(func(td interface{}) error {
return json.Unmarshal(data, td)
})
}
type partitionTypeGUID PartitionTypeGUID
func (d *PartitionTypeGUID) unmarshal(unmarshal func(interface{}) error) error {
func (d *PartitionTypeGUID) UnmarshalJSON(data []byte) error {
td := partitionTypeGUID(*d)
if err := unmarshal(&td); err != nil {
if err := json.Unmarshal(data, &td); err != nil {
return err
}
*d = PartitionTypeGUID(td)
return d.assertValid()
return d.AssertValid()
}
func (d PartitionTypeGUID) assertValid() error {
ok, err := regexp.MatchString("[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12}", string(d))
func (d PartitionTypeGUID) AssertValid() error {
ok, err := regexp.MatchString("^(|[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12})$", string(d))
if err != nil {
return fmt.Errorf("error matching type-guid regexp: %v", err)
}

View File

@@ -15,6 +15,6 @@
package types
type Passwd struct {
Users []User `json:"users,omitempty" yaml:"users"`
Groups []Group `json:"groups,omitempty" yaml:"groups"`
Users []User `json:"users,omitempty"`
Groups []Group `json:"groups,omitempty"`
}

View File

@@ -25,33 +25,22 @@ var (
)
type Path string
func (p *Path) UnmarshalYAML(unmarshal func(interface{}) error) error {
return p.unmarshal(unmarshal)
}
type path Path
func (p *Path) UnmarshalJSON(data []byte) error {
return p.unmarshal(func(td interface{}) error {
return json.Unmarshal(data, td)
})
td := path(*p)
if err := json.Unmarshal(data, &td); err != nil {
return err
}
*p = Path(td)
return p.AssertValid()
}
func (p Path) MarshalJSON() ([]byte, error) {
return []byte(`"` + string(p) + `"`), nil
}
type path Path
func (p *Path) unmarshal(unmarshal func(interface{}) error) error {
td := path(*p)
if err := unmarshal(&td); err != nil {
return err
}
*p = Path(td)
return p.assertValid()
}
func (p Path) assertValid() error {
func (p Path) AssertValid() error {
if !filepath.IsAbs(string(p)) {
return ErrPathRelative
}

View File

@@ -18,8 +18,6 @@ import (
"encoding/json"
"reflect"
"testing"
"github.com/go-yaml/yaml"
)
func TestPathUnmarshalJSON(t *testing.T) {
@@ -57,41 +55,6 @@ func TestPathUnmarshalJSON(t *testing.T) {
}
}
func TestPathUnmarshalYAML(t *testing.T) {
type in struct {
data string
}
type out struct {
device Path
err error
}
tests := []struct {
in in
out out
}{
{
in: in{data: `"/path"`},
out: out{device: Path("/path")},
},
{
in: in{data: `"bad"`},
out: out{device: Path("bad"), err: ErrPathRelative},
},
}
for i, test := range tests {
var device Path
err := yaml.Unmarshal([]byte(test.in.data), &device)
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
if !reflect.DeepEqual(test.out.device, device) {
t.Errorf("#%d: bad device: want %#v, got %#v", i, test.out.device, device)
}
}
}
func TestPathAssertValid(t *testing.T) {
type in struct {
device Path
@@ -127,7 +90,7 @@ func TestPathAssertValid(t *testing.T) {
}
for i, test := range tests {
err := test.in.device.assertValid()
err := test.in.device.AssertValid()
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}

View File

@@ -20,34 +20,23 @@ import (
)
type Raid struct {
Name string `json:"name" yaml:"name"`
Level string `json:"level" yaml:"level"`
Devices []Path `json:"devices,omitempty" yaml:"devices"`
Spares int `json:"spares,omitempty" yaml:"spares"`
Name string `json:"name"`
Level string `json:"level"`
Devices []Path `json:"devices,omitempty"`
Spares int `json:"spares,omitempty"`
}
func (n *Raid) UnmarshalYAML(unmarshal func(interface{}) error) error {
return n.unmarshal(unmarshal)
}
func (n *Raid) UnmarshalJSON(data []byte) error {
return n.unmarshal(func(tn interface{}) error {
return json.Unmarshal(data, tn)
})
}
type raid Raid
func (n *Raid) unmarshal(unmarshal func(interface{}) error) error {
func (n *Raid) UnmarshalJSON(data []byte) error {
tn := raid(*n)
if err := unmarshal(&tn); err != nil {
if err := json.Unmarshal(data, &tn); err != nil {
return err
}
*n = Raid(tn)
return n.assertValid()
return n.AssertValid()
}
func (n Raid) assertValid() error {
func (n Raid) AssertValid() error {
switch n.Level {
case "linear", "raid0", "0", "stripe":
if n.Spares != 0 {

View File

@@ -15,8 +15,8 @@
package types
type Storage struct {
Disks []Disk `json:"disks,omitempty" yaml:"disks"`
Arrays []Raid `json:"raid,omitempty" yaml:"raid"`
Filesystems []Filesystem `json:"filesystems,omitempty" yaml:"filesystems"`
Files []File `json:"files,omitempty" yaml:"files"`
Disks []Disk `json:"disks,omitempty"`
Arrays []Raid `json:"raid,omitempty"`
Filesystems []Filesystem `json:"filesystems,omitempty"`
Files []File `json:"files,omitempty"`
}

View File

@@ -15,5 +15,5 @@
package types
type Systemd struct {
Units []SystemdUnit `json:"units,omitempty" yaml:"units"`
Units []SystemdUnit `json:"units,omitempty"`
}

View File

@@ -21,42 +21,31 @@ import (
)
type SystemdUnit struct {
Name SystemdUnitName `json:"name,omitempty" yaml:"name"`
Enable bool `json:"enable,omitempty" yaml:"enable"`
Mask bool `json:"mask,omitempty" yaml:"mask"`
Contents string `json:"contents,omitempty" yaml:"contents"`
DropIns []SystemdUnitDropIn `json:"dropins,omitempty" yaml:"dropins"`
Name SystemdUnitName `json:"name,omitempty"`
Enable bool `json:"enable,omitempty"`
Mask bool `json:"mask,omitempty"`
Contents string `json:"contents,omitempty"`
DropIns []SystemdUnitDropIn `json:"dropins,omitempty"`
}
type SystemdUnitDropIn struct {
Name SystemdUnitDropInName `json:"name,omitempty" yaml:"name"`
Contents string `json:"contents,omitempty" yaml:"contents"`
Name SystemdUnitDropInName `json:"name,omitempty"`
Contents string `json:"contents,omitempty"`
}
type SystemdUnitName string
func (n *SystemdUnitName) UnmarshalYAML(unmarshal func(interface{}) error) error {
return n.unmarshal(unmarshal)
}
func (n *SystemdUnitName) UnmarshalJSON(data []byte) error {
return n.unmarshal(func(tn interface{}) error {
return json.Unmarshal(data, tn)
})
}
type systemdUnitName SystemdUnitName
func (n *SystemdUnitName) unmarshal(unmarshal func(interface{}) error) error {
func (n *SystemdUnitName) UnmarshalJSON(data []byte) error {
tn := systemdUnitName(*n)
if err := unmarshal(&tn); err != nil {
if err := json.Unmarshal(data, &tn); err != nil {
return err
}
*n = SystemdUnitName(tn)
return n.assertValid()
return n.AssertValid()
}
func (n SystemdUnitName) assertValid() error {
func (n SystemdUnitName) AssertValid() error {
switch filepath.Ext(string(n)) {
case ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice", ".scope":
return nil
@@ -66,29 +55,18 @@ func (n SystemdUnitName) assertValid() error {
}
type SystemdUnitDropInName string
func (n *SystemdUnitDropInName) UnmarshalYAML(unmarshal func(interface{}) error) error {
return n.unmarshal(unmarshal)
}
func (n *SystemdUnitDropInName) UnmarshalJSON(data []byte) error {
return n.unmarshal(func(tn interface{}) error {
return json.Unmarshal(data, tn)
})
}
type systemdUnitDropInName SystemdUnitDropInName
func (n *SystemdUnitDropInName) unmarshal(unmarshal func(interface{}) error) error {
func (n *SystemdUnitDropInName) UnmarshalJSON(data []byte) error {
tn := systemdUnitDropInName(*n)
if err := unmarshal(&tn); err != nil {
if err := json.Unmarshal(data, &tn); err != nil {
return err
}
*n = SystemdUnitDropInName(tn)
return n.assertValid()
return n.AssertValid()
}
func (n SystemdUnitDropInName) assertValid() error {
func (n SystemdUnitDropInName) AssertValid() error {
switch filepath.Ext(string(n)) {
case ".conf":
return nil
@@ -98,34 +76,23 @@ func (n SystemdUnitDropInName) assertValid() error {
}
type NetworkdUnit struct {
Name NetworkdUnitName `json:"name,omitempty" yaml:"name"`
Contents string `json:"contents,omitempty" yaml:"contents"`
Name NetworkdUnitName `json:"name,omitempty"`
Contents string `json:"contents,omitempty"`
}
type NetworkdUnitName string
func (n *NetworkdUnitName) UnmarshalYAML(unmarshal func(interface{}) error) error {
return n.unmarshal(unmarshal)
}
func (n *NetworkdUnitName) UnmarshalJSON(data []byte) error {
return n.unmarshal(func(tn interface{}) error {
return json.Unmarshal(data, tn)
})
}
type networkdUnitName NetworkdUnitName
func (n *NetworkdUnitName) unmarshal(unmarshal func(interface{}) error) error {
func (n *NetworkdUnitName) UnmarshalJSON(data []byte) error {
tn := networkdUnitName(*n)
if err := unmarshal(&tn); err != nil {
if err := json.Unmarshal(data, &tn); err != nil {
return err
}
*n = NetworkdUnitName(tn)
return n.assertValid()
return n.AssertValid()
}
func (n NetworkdUnitName) assertValid() error {
func (n NetworkdUnitName) AssertValid() error {
switch filepath.Ext(string(n)) {
case ".link", ".netdev", ".network":
return nil

View File

@@ -19,8 +19,6 @@ import (
"errors"
"reflect"
"testing"
"github.com/go-yaml/yaml"
)
func TestSystemdUnitNameUnmarshalJSON(t *testing.T) {
@@ -66,49 +64,6 @@ func TestSystemdUnitNameUnmarshalJSON(t *testing.T) {
}
}
func TestSystemdUnitNameUnmarshalYAML(t *testing.T) {
type in struct {
data string
}
type out struct {
unit SystemdUnitName
err error
}
tests := []struct {
in in
out out
}{
{
in: in{data: `"test.service"`},
out: out{unit: SystemdUnitName("test.service")},
},
{
in: in{data: `"test.socket"`},
out: out{unit: SystemdUnitName("test.socket")},
},
{
in: in{data: `"test.blah"`},
out: out{err: errors.New("invalid systemd unit extension")},
},
}
for i, test := range tests {
var unit SystemdUnitName
err := yaml.Unmarshal([]byte(test.in.data), &unit)
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
if err != nil {
continue
}
if !reflect.DeepEqual(test.out.unit, unit) {
t.Errorf("#%d: bad unit: want %#v, got %#v", i, test.out.unit, unit)
}
}
}
func TestNetworkdUnitNameUnmarshalJSON(t *testing.T) {
type in struct {
data string
@@ -155,50 +110,3 @@ func TestNetworkdUnitNameUnmarshalJSON(t *testing.T) {
}
}
}
func TestNetworkdUnitNameUnmarshalYAML(t *testing.T) {
type in struct {
data string
}
type out struct {
unit NetworkdUnitName
err error
}
tests := []struct {
in in
out out
}{
{
in: in{data: `"test.network"`},
out: out{unit: NetworkdUnitName("test.network")},
},
{
in: in{data: `"test.link"`},
out: out{unit: NetworkdUnitName("test.link")},
},
{
in: in{data: `"test.netdev"`},
out: out{unit: NetworkdUnitName("test.netdev")},
},
{
in: in{data: `"test.blah"`},
out: out{err: errors.New("invalid networkd unit extension")},
},
}
for i, test := range tests {
var unit NetworkdUnitName
err := yaml.Unmarshal([]byte(test.in.data), &unit)
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
if err != nil {
continue
}
if !reflect.DeepEqual(test.out.unit, unit) {
t.Errorf("#%d: bad unit: want %#v, got %#v", i, test.out.unit, unit)
}
}
}

View File

@@ -21,23 +21,9 @@ import (
type Url url.URL
func (u *Url) UnmarshalYAML(unmarshal func(interface{}) error) error {
return u.unmarshal(unmarshal)
}
func (u *Url) UnmarshalJSON(data []byte) error {
return u.unmarshal(func(tu interface{}) error {
return json.Unmarshal(data, tu)
})
}
func (u Url) MarshalJSON() ([]byte, error) {
return []byte(`"` + u.String() + `"`), nil
}
func (u *Url) unmarshal(unmarshal func(interface{}) error) error {
var tu string
if err := unmarshal(&tu); err != nil {
if err := json.Unmarshal(data, &tu); err != nil {
return err
}
@@ -46,6 +32,10 @@ func (u *Url) unmarshal(unmarshal func(interface{}) error) error {
return err
}
func (u Url) MarshalJSON() ([]byte, error) {
return []byte(`"` + u.String() + `"`), nil
}
func (u Url) String() string {
tu := url.URL(u)
return (&tu).String()

View File

@@ -15,21 +15,21 @@
package types
type User struct {
Name string `json:"name,omitempty" yaml:"name"`
PasswordHash string `json:"passwordHash,omitempty" yaml:"password_hash"`
SSHAuthorizedKeys []string `json:"sshAuthorizedKeys,omitempty" yaml:"ssh_authorized_keys"`
Create *UserCreate `json:"create,omitempty" yaml:"create"`
Name string `json:"name,omitempty"`
PasswordHash string `json:"passwordHash,omitempty"`
SSHAuthorizedKeys []string `json:"sshAuthorizedKeys,omitempty"`
Create *UserCreate `json:"create,omitempty"`
}
type UserCreate struct {
Uid *uint `json:"uid,omitempty" yaml:"uid"`
GECOS string `json:"gecos,omitempty" yaml:"gecos"`
Homedir string `json:"homeDir,omitempty" yaml:"home_dir"`
NoCreateHome bool `json:"noCreateHome,omitempty" yaml:"no_create_home"`
PrimaryGroup string `json:"primaryGroup,omitempty" yaml:"primary_group"`
Groups []string `json:"groups,omitempty" yaml:"groups"`
NoUserGroup bool `json:"noUserGroup,omitempty" yaml:"no_user_group"`
System bool `json:"system,omitempty" yaml:"system"`
NoLogInit bool `json:"noLogInit,omitempty" yaml:"no_log_init"`
Shell string `json:"shell,omitempty" yaml:"shell"`
Uid *uint `json:"uid,omitempty"`
GECOS string `json:"gecos,omitempty"`
Homedir string `json:"homeDir,omitempty"`
NoCreateHome bool `json:"noCreateHome,omitempty"`
PrimaryGroup string `json:"primaryGroup,omitempty"`
Groups []string `json:"groups,omitempty"`
NoUserGroup bool `json:"noUserGroup,omitempty"`
System bool `json:"system,omitempty"`
NoLogInit bool `json:"noLogInit,omitempty"`
Shell string `json:"shell,omitempty"`
}

View File

@@ -15,5 +15,5 @@
package types
type Verification struct {
Hash *Hash `json:"hash,omitempty" yaml:"hash"`
Hash *Hash `json:"hash,omitempty"`
}

View File

@@ -14,14 +14,60 @@
package types
import (
"reflect"
)
const (
Version = 1
)
type Config struct {
Version int `json:"ignitionVersion" yaml:"ignition_version"`
Storage Storage `json:"storage,omitempty" yaml:"storage"`
Systemd Systemd `json:"systemd,omitempty" yaml:"systemd"`
Networkd Networkd `json:"networkd,omitempty" yaml:"networkd"`
Passwd Passwd `json:"passwd,omitempty" yaml:"passwd"`
Version int `json:"ignitionVersion"`
Storage Storage `json:"storage,omitempty"`
Systemd Systemd `json:"systemd,omitempty"`
Networkd Networkd `json:"networkd,omitempty"`
Passwd Passwd `json:"passwd,omitempty"`
}
func (c Config) AssertValid() error {
return assertStructValid(reflect.ValueOf(c))
}
func assertValid(vObj reflect.Value) error {
if !vObj.IsValid() {
return nil
}
if obj, ok := vObj.Interface().(interface {
AssertValid() error
}); ok && !(vObj.Kind() == reflect.Ptr && vObj.IsNil()) {
if err := obj.AssertValid(); err != nil {
return err
}
}
switch vObj.Kind() {
case reflect.Ptr:
return assertValid(vObj.Elem())
case reflect.Struct:
return assertStructValid(vObj)
case reflect.Slice:
for i := 0; i < vObj.Len(); i++ {
if err := assertValid(vObj.Index(i)); err != nil {
return err
}
}
}
return nil
}
func assertStructValid(vObj reflect.Value) error {
for i := 0; i < vObj.Type().NumField(); i++ {
if err := assertValid(vObj.Field(i)); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,51 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"errors"
"reflect"
"testing"
)
func TestAssertValid(t *testing.T) {
type in struct {
cfg Config
}
type out struct {
err error
}
tests := []struct {
in in
out out
}{
{
in: in{cfg: Config{}},
out: out{},
},
{
in: in{cfg: Config{Systemd: Systemd{Units: []SystemdUnit{{Name: "foo.bar"}}}}},
out: out{err: errors.New("invalid systemd unit extension")},
},
}
for i, test := range tests {
err := test.in.cfg.AssertValid()
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
}
}

View File

@@ -20,44 +20,22 @@ import (
)
type Disk struct {
Device Path `json:"device,omitempty" yaml:"device"`
WipeTable bool `json:"wipeTable,omitempty" yaml:"wipe_table"`
Partitions []Partition `json:"partitions,omitempty" yaml:"partitions"`
Device Path `json:"device,omitempty"`
WipeTable bool `json:"wipeTable,omitempty"`
Partitions []Partition `json:"partitions,omitempty"`
}
func (n *Disk) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err := n.unmarshal(unmarshal); err != nil {
return err
}
if err := n.preparePartitions(); err != nil {
return err
}
return n.assertValid()
}
func (n *Disk) UnmarshalJSON(data []byte) error {
err := n.unmarshal(func(tn interface{}) error {
return json.Unmarshal(data, tn)
})
if err != nil {
return err
}
return n.assertValid()
}
type disk Disk
func (n *Disk) unmarshal(unmarshal func(interface{}) error) error {
func (n *Disk) UnmarshalJSON(data []byte) error {
tn := disk(*n)
if err := unmarshal(&tn); err != nil {
if err := json.Unmarshal(data, &tn); err != nil {
return err
}
*n = Disk(tn)
return nil
return n.AssertValid()
}
func (n Disk) assertValid() error {
// This applies to YAML (post-prepare) and JSON unmarshals equally:
func (n Disk) AssertValid() error {
if len(n.Device) == 0 {
return fmt.Errorf("disk device is required")
}
@@ -140,28 +118,3 @@ func (n Disk) partitionsMisaligned() bool {
}
return false
}
// preparePartitions performs some checks and potentially adjusts the partitions for alignment.
// This is only invoked when unmarshalling YAML, since there we parse human-friendly units.
func (n *Disk) preparePartitions() error {
// On the YAML side we accept human-friendly units which may require massaging.
// align partition starts
for i := range n.Partitions {
// skip automatically placed partitions
if n.Partitions[i].Start == 0 {
continue
}
// keep partitions out of the first 2048 sectors
if n.Partitions[i].Start < 2048 {
n.Partitions[i].Start = 2048
}
// toss the bottom 11 bits
n.Partitions[i].Start &= ^PartitionDimension(2048 - 1)
}
// TODO(vc): may be interesting to do something about potentially overlapping partitions
return nil
}

View File

@@ -27,35 +27,24 @@ var (
type FileMode os.FileMode
type File struct {
Path Path `json:"path,omitempty" yaml:"path"`
Contents string `json:"contents,omitempty" yaml:"contents"`
Mode FileMode `json:"mode,omitempty" yaml:"mode"`
Uid int `json:"uid,omitempty" yaml:"uid"`
Gid int `json:"gid,omitempty" yaml:"gid"`
Path Path `json:"path,omitempty"`
Contents string `json:"contents,omitempty"`
Mode FileMode `json:"mode,omitempty"`
Uid int `json:"uid,omitempty"`
Gid int `json:"gid,omitempty"`
}
func (m *FileMode) UnmarshalYAML(unmarshal func(interface{}) error) error {
return m.unmarshal(unmarshal)
}
func (m *FileMode) UnmarshalJSON(data []byte) error {
return m.unmarshal(func(tm interface{}) error {
return json.Unmarshal(data, tm)
})
}
type fileMode FileMode
func (m *FileMode) unmarshal(unmarshal func(interface{}) error) error {
func (m *FileMode) UnmarshalJSON(data []byte) error {
tm := fileMode(*m)
if err := unmarshal(&tm); err != nil {
if err := json.Unmarshal(data, &tm); err != nil {
return err
}
*m = FileMode(tm)
return m.assertValid()
return m.AssertValid()
}
func (m FileMode) assertValid() error {
func (m FileMode) AssertValid() error {
if (m &^ 07777) != 0 {
return ErrFileIllegalMode
}

View File

@@ -18,8 +18,6 @@ import (
"encoding/json"
"reflect"
"testing"
"github.com/go-yaml/yaml"
)
func TestFileModeUnmarshalJSON(t *testing.T) {
@@ -57,45 +55,6 @@ func TestFileModeUnmarshalJSON(t *testing.T) {
}
}
func TestFileModeUnmarshalYAML(t *testing.T) {
type in struct {
data string
}
type out struct {
mode FileMode
err error
}
tests := []struct {
in in
out out
}{
{
in: in{data: `0644`},
out: out{mode: FileMode(0644)},
},
{
in: in{data: `0420`},
out: out{mode: FileMode(0420)},
},
{
in: in{data: `017777`},
out: out{mode: FileMode(017777), err: ErrFileIllegalMode},
},
}
for i, test := range tests {
var mode FileMode
err := yaml.Unmarshal([]byte(test.in.data), &mode)
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
if !reflect.DeepEqual(test.out.mode, mode) {
t.Errorf("#%d: bad mode: want %#o, got %#o", i, test.out.mode, mode)
}
}
}
func TestFileAssertValid(t *testing.T) {
type in struct {
mode FileMode
@@ -131,7 +90,7 @@ func TestFileAssertValid(t *testing.T) {
}
for i, test := range tests {
err := test.in.mode.assertValid()
err := test.in.mode.AssertValid()
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}

View File

@@ -24,72 +24,50 @@ var (
)
type Filesystem struct {
Device Path `json:"device,omitempty" yaml:"device"`
Format FilesystemFormat `json:"format,omitempty" yaml:"format"`
Create *FilesystemCreate `json:"create,omitempty" yaml:"create"`
Files []File `json:"files,omitempty" yaml:"files"`
Device Path `json:"device,omitempty"`
Format FilesystemFormat `json:"format,omitempty"`
Create *FilesystemCreate `json:"create,omitempty"`
Files []File `json:"files,omitempty"`
}
type FilesystemCreate struct {
Force bool `json:"force,omitempty" yaml:"force"`
Options MkfsOptions `json:"options,omitempty" yaml:"options"`
Force bool `json:"force,omitempty"`
Options MkfsOptions `json:"options,omitempty"`
}
func (f *Filesystem) UnmarshalYAML(unmarshal func(interface{}) error) error {
return f.unmarshal(unmarshal)
}
func (f *Filesystem) UnmarshalJSON(data []byte) error {
return f.unmarshal(func(tf interface{}) error {
return json.Unmarshal(data, tf)
})
}
type filesystem Filesystem
func (f *Filesystem) unmarshal(unmarshal func(interface{}) error) error {
func (f *Filesystem) UnmarshalJSON(data []byte) error {
tf := filesystem(*f)
if err := unmarshal(&tf); err != nil {
if err := json.Unmarshal(data, &tf); err != nil {
return err
}
*f = Filesystem(tf)
return f.assertValid()
return f.AssertValid()
}
func (f Filesystem) assertValid() error {
if err := f.Device.assertValid(); err != nil {
func (f Filesystem) AssertValid() error {
if err := f.Device.AssertValid(); err != nil {
return err
}
if err := f.Format.assertValid(); err != nil {
if err := f.Format.AssertValid(); err != nil {
return err
}
return nil
}
type FilesystemFormat string
func (f *FilesystemFormat) UnmarshalYAML(unmarshal func(interface{}) error) error {
return f.unmarshal(unmarshal)
}
func (f *FilesystemFormat) UnmarshalJSON(data []byte) error {
return f.unmarshal(func(tf interface{}) error {
return json.Unmarshal(data, tf)
})
}
type filesystemFormat FilesystemFormat
func (f *FilesystemFormat) unmarshal(unmarshal func(interface{}) error) error {
func (f *FilesystemFormat) UnmarshalJSON(data []byte) error {
tf := filesystemFormat(*f)
if err := unmarshal(&tf); err != nil {
if err := json.Unmarshal(data, &tf); err != nil {
return err
}
*f = FilesystemFormat(tf)
return f.assertValid()
return f.AssertValid()
}
func (f FilesystemFormat) assertValid() error {
func (f FilesystemFormat) AssertValid() error {
switch f {
case "ext4", "btrfs", "xfs":
return nil
@@ -99,28 +77,17 @@ func (f FilesystemFormat) assertValid() error {
}
type MkfsOptions []string
func (o *MkfsOptions) UnmarshalYAML(unmarshal func(interface{}) error) error {
return o.unmarshal(unmarshal)
}
func (o *MkfsOptions) UnmarshalJSON(data []byte) error {
return o.unmarshal(func(to interface{}) error {
return json.Unmarshal(data, to)
})
}
type mkfsOptions MkfsOptions
func (o *MkfsOptions) unmarshal(unmarshal func(interface{}) error) error {
func (o *MkfsOptions) UnmarshalJSON(data []byte) error {
to := mkfsOptions(*o)
if err := unmarshal(&to); err != nil {
if err := json.Unmarshal(data, &to); err != nil {
return err
}
*o = MkfsOptions(to)
return o.assertValid()
return o.AssertValid()
}
func (o MkfsOptions) assertValid() error {
func (o MkfsOptions) AssertValid() error {
return nil
}

View File

@@ -18,8 +18,6 @@ import (
"encoding/json"
"reflect"
"testing"
"github.com/go-yaml/yaml"
)
func TestFilesystemFormatUnmarshalJSON(t *testing.T) {
@@ -57,41 +55,6 @@ func TestFilesystemFormatUnmarshalJSON(t *testing.T) {
}
}
func TestFilesystemFormatUnmarshalYAML(t *testing.T) {
type in struct {
data string
}
type out struct {
format FilesystemFormat
err error
}
tests := []struct {
in in
out out
}{
{
in: in{data: `"ext4"`},
out: out{format: FilesystemFormat("ext4")},
},
{
in: in{data: `"bad"`},
out: out{format: FilesystemFormat("bad"), err: ErrFilesystemInvalidFormat},
},
}
for i, test := range tests {
var format FilesystemFormat
err := yaml.Unmarshal([]byte(test.in.data), &format)
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
if !reflect.DeepEqual(test.out.format, format) {
t.Errorf("#%d: bad format: want %#v, got %#v", i, test.out.format, format)
}
}
}
func TestFilesystemFormatAssertValid(t *testing.T) {
type in struct {
format FilesystemFormat
@@ -119,7 +82,7 @@ func TestFilesystemFormatAssertValid(t *testing.T) {
}
for i, test := range tests {
err := test.in.format.assertValid()
err := test.in.format.AssertValid()
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
@@ -157,37 +120,6 @@ func TestMkfsOptionsUnmarshalJSON(t *testing.T) {
}
}
func TestMkfsOptionsUnmarshalYAML(t *testing.T) {
type in struct {
data string
}
type out struct {
options MkfsOptions
err error
}
tests := []struct {
in in
out out
}{
{
in: in{data: `["--label=ROOT"]`},
out: out{options: MkfsOptions([]string{"--label=ROOT"})},
},
}
for i, test := range tests {
var options MkfsOptions
err := yaml.Unmarshal([]byte(test.in.data), &options)
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
if !reflect.DeepEqual(test.out.options, options) {
t.Errorf("#%d: bad device: want %#v, got %#v", i, test.out.options, options)
}
}
}
func TestFilesystemUnmarshalJSON(t *testing.T) {
type in struct {
data string
@@ -223,41 +155,6 @@ func TestFilesystemUnmarshalJSON(t *testing.T) {
}
}
func TestFilesystemUnmarshalYAML(t *testing.T) {
type in struct {
data string
}
type out struct {
filesystem Filesystem
err error
}
tests := []struct {
in in
out out
}{
{
in: in{data: "device: /foo\nformat: ext4"},
out: out{filesystem: Filesystem{Device: "/foo", Format: "ext4"}},
},
{
in: in{data: "format: ext4"},
out: out{filesystem: Filesystem{Format: "ext4"}, err: ErrPathRelative},
},
}
for i, test := range tests {
var filesystem Filesystem
err := yaml.Unmarshal([]byte(test.in.data), &filesystem)
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
if !reflect.DeepEqual(test.out.filesystem, filesystem) {
t.Errorf("#%d: bad filesystem: want %#v, got %#v", i, test.out.filesystem, filesystem)
}
}
}
func TestFilesystemAssertValid(t *testing.T) {
type in struct {
filesystem Filesystem
@@ -289,7 +186,7 @@ func TestFilesystemAssertValid(t *testing.T) {
}
for i, test := range tests {
err := test.in.filesystem.assertValid()
err := test.in.filesystem.AssertValid()
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}

View File

@@ -15,8 +15,8 @@
package types
type Group struct {
Name string `json:"name,omitempty" yaml:"name"`
Gid *uint `json:"gid,omitempty" yaml:"gid"`
PasswordHash string `json:"passwordHash,omitempty" yaml:"password_hash"`
System bool `json:"system,omitempty" yaml:"system"`
Name string `json:"name,omitempty"`
Gid *uint `json:"gid,omitempty"`
PasswordHash string `json:"passwordHash,omitempty"`
System bool `json:"system,omitempty"`
}

View File

@@ -15,5 +15,5 @@
package types
type Networkd struct {
Units []NetworkdUnit `json:"units,omitempty" yaml:"units"`
Units []NetworkdUnit `json:"units,omitempty"`
}

View File

@@ -18,42 +18,29 @@ import (
"encoding/json"
"fmt"
"regexp"
"github.com/alecthomas/units"
)
type Partition struct {
Label PartitionLabel `json:"label,omitempty" yaml:"label"`
Number int `json:"number" yaml:"number"`
Size PartitionDimension `json:"size" yaml:"size"`
Start PartitionDimension `json:"start" yaml:"start"`
TypeGUID PartitionTypeGUID `json:"typeGuid,omitempty" yaml:"type_guid"`
Label PartitionLabel `json:"label,omitempty"`
Number int `json:"number"`
Size PartitionDimension `json:"size"`
Start PartitionDimension `json:"start"`
TypeGUID PartitionTypeGUID `json:"typeGuid,omitempty"`
}
type PartitionLabel string
func (n *PartitionLabel) UnmarshalYAML(unmarshal func(interface{}) error) error {
return n.unmarshal(unmarshal)
}
func (n *PartitionLabel) UnmarshalJSON(data []byte) error {
return n.unmarshal(func(tn interface{}) error {
return json.Unmarshal(data, tn)
})
}
type partitionLabel PartitionLabel
func (n *PartitionLabel) unmarshal(unmarshal func(interface{}) error) error {
func (n *PartitionLabel) UnmarshalJSON(data []byte) error {
tn := partitionLabel(*n)
if err := unmarshal(&tn); err != nil {
if err := json.Unmarshal(data, &tn); err != nil {
return err
}
*n = PartitionLabel(tn)
return n.assertValid()
return n.AssertValid()
}
func (n PartitionLabel) assertValid() error {
func (n PartitionLabel) AssertValid() error {
// http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_entries:
// 56 (0x38) 72 bytes Partition name (36 UTF-16LE code units)
@@ -67,33 +54,7 @@ func (n PartitionLabel) assertValid() error {
type PartitionDimension uint64
func (n *PartitionDimension) UnmarshalYAML(unmarshal func(interface{}) error) error {
// In YAML we allow human-readable dimensions like GiB/TiB etc.
var str string
if err := unmarshal(&str); err != nil {
return err
}
b2b, err := units.ParseBase2Bytes(str) // TODO(vc): replace the units package
if err != nil {
return err
}
if b2b < 0 {
return fmt.Errorf("negative value inappropriate: %q", str)
}
// Translate bytes into sectors
sectors := (b2b / 512)
if b2b%512 != 0 {
sectors++
}
*n = PartitionDimension(uint64(sectors))
return nil
}
func (n *PartitionDimension) UnmarshalJSON(data []byte) error {
// In JSON we expect plain integral sectors.
// The YAML->JSON conversion is responsible for normalizing human units to sectors.
var pd uint64
if err := json.Unmarshal(data, &pd); err != nil {
return err
@@ -103,30 +64,19 @@ func (n *PartitionDimension) UnmarshalJSON(data []byte) error {
}
type PartitionTypeGUID string
func (d *PartitionTypeGUID) UnmarshalYAML(unmarshal func(interface{}) error) error {
return d.unmarshal(unmarshal)
}
func (d *PartitionTypeGUID) UnmarshalJSON(data []byte) error {
return d.unmarshal(func(td interface{}) error {
return json.Unmarshal(data, td)
})
}
type partitionTypeGUID PartitionTypeGUID
func (d *PartitionTypeGUID) unmarshal(unmarshal func(interface{}) error) error {
func (d *PartitionTypeGUID) UnmarshalJSON(data []byte) error {
td := partitionTypeGUID(*d)
if err := unmarshal(&td); err != nil {
if err := json.Unmarshal(data, &td); err != nil {
return err
}
*d = PartitionTypeGUID(td)
return d.assertValid()
return d.AssertValid()
}
func (d PartitionTypeGUID) assertValid() error {
ok, err := regexp.MatchString("[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12}", string(d))
func (d PartitionTypeGUID) AssertValid() error {
ok, err := regexp.MatchString("^(|[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12})$", string(d))
if err != nil {
return fmt.Errorf("error matching type-guid regexp: %v", err)
}

View File

@@ -15,6 +15,6 @@
package types
type Passwd struct {
Users []User `json:"users,omitempty" yaml:"users"`
Groups []Group `json:"groups,omitempty" yaml:"groups"`
Users []User `json:"users,omitempty"`
Groups []Group `json:"groups,omitempty"`
}

View File

@@ -25,29 +25,18 @@ var (
)
type Path string
func (d *Path) UnmarshalYAML(unmarshal func(interface{}) error) error {
return d.unmarshal(unmarshal)
}
func (d *Path) UnmarshalJSON(data []byte) error {
return d.unmarshal(func(td interface{}) error {
return json.Unmarshal(data, td)
})
}
type path Path
func (d *Path) unmarshal(unmarshal func(interface{}) error) error {
func (d *Path) UnmarshalJSON(data []byte) error {
td := path(*d)
if err := unmarshal(&td); err != nil {
if err := json.Unmarshal(data, &td); err != nil {
return err
}
*d = Path(td)
return d.assertValid()
return d.AssertValid()
}
func (d Path) assertValid() error {
func (d Path) AssertValid() error {
if !filepath.IsAbs(string(d)) {
return ErrPathRelative
}

View File

@@ -18,8 +18,6 @@ import (
"encoding/json"
"reflect"
"testing"
"github.com/go-yaml/yaml"
)
func TestPathUnmarshalJSON(t *testing.T) {
@@ -57,41 +55,6 @@ func TestPathUnmarshalJSON(t *testing.T) {
}
}
func TestPathUnmarshalYAML(t *testing.T) {
type in struct {
data string
}
type out struct {
device Path
err error
}
tests := []struct {
in in
out out
}{
{
in: in{data: `"/path"`},
out: out{device: Path("/path")},
},
{
in: in{data: `"bad"`},
out: out{device: Path("bad"), err: ErrPathRelative},
},
}
for i, test := range tests {
var device Path
err := yaml.Unmarshal([]byte(test.in.data), &device)
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
if !reflect.DeepEqual(test.out.device, device) {
t.Errorf("#%d: bad device: want %#v, got %#v", i, test.out.device, device)
}
}
}
func TestPathAssertValid(t *testing.T) {
type in struct {
device Path
@@ -127,7 +90,7 @@ func TestPathAssertValid(t *testing.T) {
}
for i, test := range tests {
err := test.in.device.assertValid()
err := test.in.device.AssertValid()
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}

View File

@@ -20,34 +20,23 @@ import (
)
type Raid struct {
Name string `json:"name" yaml:"name"`
Level string `json:"level" yaml:"level"`
Devices []Path `json:"devices,omitempty" yaml:"devices"`
Spares int `json:"spares,omitempty" yaml:"spares"`
Name string `json:"name"`
Level string `json:"level"`
Devices []Path `json:"devices,omitempty"`
Spares int `json:"spares,omitempty"`
}
func (n *Raid) UnmarshalYAML(unmarshal func(interface{}) error) error {
return n.unmarshal(unmarshal)
}
func (n *Raid) UnmarshalJSON(data []byte) error {
return n.unmarshal(func(tn interface{}) error {
return json.Unmarshal(data, tn)
})
}
type raid Raid
func (n *Raid) unmarshal(unmarshal func(interface{}) error) error {
func (n *Raid) UnmarshalJSON(data []byte) error {
tn := raid(*n)
if err := unmarshal(&tn); err != nil {
if err := json.Unmarshal(data, &tn); err != nil {
return err
}
*n = Raid(tn)
return n.assertValid()
return n.AssertValid()
}
func (n Raid) assertValid() error {
func (n Raid) AssertValid() error {
switch n.Level {
case "linear", "raid0", "0", "stripe":
if n.Spares != 0 {

View File

@@ -15,7 +15,7 @@
package types
type Storage struct {
Disks []Disk `json:"disks,omitempty" yaml:"disks"`
Arrays []Raid `json:"raid,omitempty" yaml:"raid"`
Filesystems []Filesystem `json:"filesystems,omitempty" yaml:"filesystems"`
Disks []Disk `json:"disks,omitempty"`
Arrays []Raid `json:"raid,omitempty"`
Filesystems []Filesystem `json:"filesystems,omitempty"`
}

View File

@@ -15,5 +15,5 @@
package types
type Systemd struct {
Units []SystemdUnit `json:"units,omitempty" yaml:"units"`
Units []SystemdUnit `json:"units,omitempty"`
}

View File

@@ -21,42 +21,31 @@ import (
)
type SystemdUnit struct {
Name SystemdUnitName `json:"name,omitempty" yaml:"name"`
Enable bool `json:"enable,omitempty" yaml:"enable"`
Mask bool `json:"mask,omitempty" yaml:"mask"`
Contents string `json:"contents,omitempty" yaml:"contents"`
DropIns []SystemdUnitDropIn `json:"dropins,omitempty" yaml:"dropins"`
Name SystemdUnitName `json:"name,omitempty"`
Enable bool `json:"enable,omitempty"`
Mask bool `json:"mask,omitempty"`
Contents string `json:"contents,omitempty"`
DropIns []SystemdUnitDropIn `json:"dropins,omitempty"`
}
type SystemdUnitDropIn struct {
Name SystemdUnitDropInName `json:"name,omitempty" yaml:"name"`
Contents string `json:"contents,omitempty" yaml:"contents"`
Name SystemdUnitDropInName `json:"name,omitempty"`
Contents string `json:"contents,omitempty"`
}
type SystemdUnitName string
func (n *SystemdUnitName) UnmarshalYAML(unmarshal func(interface{}) error) error {
return n.unmarshal(unmarshal)
}
func (n *SystemdUnitName) UnmarshalJSON(data []byte) error {
return n.unmarshal(func(tn interface{}) error {
return json.Unmarshal(data, tn)
})
}
type systemdUnitName SystemdUnitName
func (n *SystemdUnitName) unmarshal(unmarshal func(interface{}) error) error {
func (n *SystemdUnitName) UnmarshalJSON(data []byte) error {
tn := systemdUnitName(*n)
if err := unmarshal(&tn); err != nil {
if err := json.Unmarshal(data, &tn); err != nil {
return err
}
*n = SystemdUnitName(tn)
return n.assertValid()
return n.AssertValid()
}
func (n SystemdUnitName) assertValid() error {
func (n SystemdUnitName) AssertValid() error {
switch filepath.Ext(string(n)) {
case ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice", ".scope":
return nil
@@ -66,29 +55,18 @@ func (n SystemdUnitName) assertValid() error {
}
type SystemdUnitDropInName string
func (n *SystemdUnitDropInName) UnmarshalYAML(unmarshal func(interface{}) error) error {
return n.unmarshal(unmarshal)
}
func (n *SystemdUnitDropInName) UnmarshalJSON(data []byte) error {
return n.unmarshal(func(tn interface{}) error {
return json.Unmarshal(data, tn)
})
}
type systemdUnitDropInName SystemdUnitDropInName
func (n *SystemdUnitDropInName) unmarshal(unmarshal func(interface{}) error) error {
func (n *SystemdUnitDropInName) UnmarshalJSON(data []byte) error {
tn := systemdUnitDropInName(*n)
if err := unmarshal(&tn); err != nil {
if err := json.Unmarshal(data, &tn); err != nil {
return err
}
*n = SystemdUnitDropInName(tn)
return n.assertValid()
return n.AssertValid()
}
func (n SystemdUnitDropInName) assertValid() error {
func (n SystemdUnitDropInName) AssertValid() error {
switch filepath.Ext(string(n)) {
case ".conf":
return nil
@@ -98,34 +76,23 @@ func (n SystemdUnitDropInName) assertValid() error {
}
type NetworkdUnit struct {
Name NetworkdUnitName `json:"name,omitempty" yaml:"name"`
Contents string `json:"contents,omitempty" yaml:"contents"`
Name NetworkdUnitName `json:"name,omitempty"`
Contents string `json:"contents,omitempty"`
}
type NetworkdUnitName string
func (n *NetworkdUnitName) UnmarshalYAML(unmarshal func(interface{}) error) error {
return n.unmarshal(unmarshal)
}
func (n *NetworkdUnitName) UnmarshalJSON(data []byte) error {
return n.unmarshal(func(tn interface{}) error {
return json.Unmarshal(data, tn)
})
}
type networkdUnitName NetworkdUnitName
func (n *NetworkdUnitName) unmarshal(unmarshal func(interface{}) error) error {
func (n *NetworkdUnitName) UnmarshalJSON(data []byte) error {
tn := networkdUnitName(*n)
if err := unmarshal(&tn); err != nil {
if err := json.Unmarshal(data, &tn); err != nil {
return err
}
*n = NetworkdUnitName(tn)
return n.assertValid()
return n.AssertValid()
}
func (n NetworkdUnitName) assertValid() error {
func (n NetworkdUnitName) AssertValid() error {
switch filepath.Ext(string(n)) {
case ".link", ".netdev", ".network":
return nil

View File

@@ -19,8 +19,6 @@ import (
"errors"
"reflect"
"testing"
"github.com/go-yaml/yaml"
)
func TestSystemdUnitNameUnmarshalJSON(t *testing.T) {
@@ -66,49 +64,6 @@ func TestSystemdUnitNameUnmarshalJSON(t *testing.T) {
}
}
func TestSystemdUnitNameUnmarshalYAML(t *testing.T) {
type in struct {
data string
}
type out struct {
unit SystemdUnitName
err error
}
tests := []struct {
in in
out out
}{
{
in: in{data: `"test.service"`},
out: out{unit: SystemdUnitName("test.service")},
},
{
in: in{data: `"test.socket"`},
out: out{unit: SystemdUnitName("test.socket")},
},
{
in: in{data: `"test.blah"`},
out: out{err: errors.New("invalid systemd unit extension")},
},
}
for i, test := range tests {
var unit SystemdUnitName
err := yaml.Unmarshal([]byte(test.in.data), &unit)
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
if err != nil {
continue
}
if !reflect.DeepEqual(test.out.unit, unit) {
t.Errorf("#%d: bad unit: want %#v, got %#v", i, test.out.unit, unit)
}
}
}
func TestNetworkdUnitNameUnmarshalJSON(t *testing.T) {
type in struct {
data string
@@ -155,50 +110,3 @@ func TestNetworkdUnitNameUnmarshalJSON(t *testing.T) {
}
}
}
func TestNetworkdUnitNameUnmarshalYAML(t *testing.T) {
type in struct {
data string
}
type out struct {
unit NetworkdUnitName
err error
}
tests := []struct {
in in
out out
}{
{
in: in{data: `"test.network"`},
out: out{unit: NetworkdUnitName("test.network")},
},
{
in: in{data: `"test.link"`},
out: out{unit: NetworkdUnitName("test.link")},
},
{
in: in{data: `"test.netdev"`},
out: out{unit: NetworkdUnitName("test.netdev")},
},
{
in: in{data: `"test.blah"`},
out: out{err: errors.New("invalid networkd unit extension")},
},
}
for i, test := range tests {
var unit NetworkdUnitName
err := yaml.Unmarshal([]byte(test.in.data), &unit)
if !reflect.DeepEqual(test.out.err, err) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
}
if err != nil {
continue
}
if !reflect.DeepEqual(test.out.unit, unit) {
t.Errorf("#%d: bad unit: want %#v, got %#v", i, test.out.unit, unit)
}
}
}

View File

@@ -15,21 +15,21 @@
package types
type User struct {
Name string `json:"name,omitempty" yaml:"name"`
PasswordHash string `json:"passwordHash,omitempty" yaml:"password_hash"`
SSHAuthorizedKeys []string `json:"sshAuthorizedKeys,omitempty" yaml:"ssh_authorized_keys"`
Create *UserCreate `json:"create,omitempty" yaml:"create"`
Name string `json:"name,omitempty"`
PasswordHash string `json:"passwordHash,omitempty"`
SSHAuthorizedKeys []string `json:"sshAuthorizedKeys,omitempty"`
Create *UserCreate `json:"create,omitempty"`
}
type UserCreate struct {
Uid *uint `json:"uid,omitempty" yaml:"uid"`
GECOS string `json:"gecos,omitempty" yaml:"gecos"`
Homedir string `json:"homeDir,omitempty" yaml:"home_dir"`
NoCreateHome bool `json:"noCreateHome,omitempty" yaml:"no_create_home"`
PrimaryGroup string `json:"primaryGroup,omitempty" yaml:"primary_group"`
Groups []string `json:"groups,omitempty" yaml:"groups"`
NoUserGroup bool `json:"noUserGroup,omitempty" yaml:"no_user_group"`
System bool `json:"system,omitempty" yaml:"system"`
NoLogInit bool `json:"noLogInit,omitempty" yaml:"no_log_init"`
Shell string `json:"shell,omitempty" yaml:"shell"`
Uid *uint `json:"uid,omitempty"`
GECOS string `json:"gecos,omitempty"`
Homedir string `json:"homeDir,omitempty"`
NoCreateHome bool `json:"noCreateHome,omitempty"`
PrimaryGroup string `json:"primaryGroup,omitempty"`
Groups []string `json:"groups,omitempty"`
NoUserGroup bool `json:"noUserGroup,omitempty"`
System bool `json:"system,omitempty"`
NoLogInit bool `json:"noLogInit,omitempty"`
Shell string `json:"shell,omitempty"`
}

View File

@@ -1,5 +1,3 @@
# If you manipulate the contents of third_party/, amend this accordingly.
# pkg version
github.com/alecthomas/units 6b4e7dc5e3143b85ea77909c72caf89416fc2915
github.com/camlistore/camlistore/pkg/errorutil 9106ce829629773474c689b34aacd7d3aaa99426
github.com/go-yaml/yaml 49c95bdc21843256fb6c4e0d370a05f24a0bf213

View File

@@ -1,7 +1,5 @@
# If you manipulate the contents of vendor/, amend this accordingly.
# pkg version
github.com/alecthomas/units 6b4e7dc5e3143b85ea77909c72caf89416fc2915
github.com/coreos/go-semver 294930c1e79c64e7dbe360054274fdad492c8cf5
github.com/go-yaml/yaml 49c95bdc21843256fb6c4e0d370a05f24a0bf213
github.com/vincent-petithory/dataurl 9a301d65acbb728fcc3ace14f45f511a4cfeea9c
go4.org/errorutil 03efcb870d84809319ea509714dd6d19a1498483

View File

@@ -6,11 +6,11 @@ The Ignition configuration is a JSON document conforming to the following specif
* **version** (string): the semantic version number of the spec. The spec version must be compatible with the latest version (`2.0.0`). Compatibility requires the major versions to match and the spec version be less than or equal to the latest version.
* **_config_** (objects): options related to the configuration.
* **_append_** (list of objects): a list of the configs to be appended to the current config.
* **source** (string): the URL of the config. Supported schemes are http. Note: When using http, it is advisable to use the verification option to ensure the contents haven't been modified.
* **source** (string): the URL of the config. Supported schemes are http and https. Note: When using http, it is advisable to use the verification option to ensure the contents haven't been modified.
* **_verification_** (object): options related to the verification of the config.
* **_hash_** (string): the hash of the config, in the form `<type>-<value>` where type is sha512.
* **_replace_** (object): the config that will replace the current.
* **source** (string): the URL of the config. Supported schemes are http. Note: When using http, it is advisable to use the verification option to ensure the contents haven't been modified.
* **source** (string): the URL of the config. Supported schemes are http and https. Note: When using http, it is advisable to use the verification option to ensure the contents haven't been modified.
* **_verification_** (object): options related to the verification of the config.
* **_hash_** (string): the hash of the config, in the form `<type>-<value>` where type is sha512.
* **_storage_** (object): describes the desired state of the system's storage devices.
@@ -42,7 +42,7 @@ The Ignition configuration is a JSON document conforming to the following specif
* **path** (string): the absolute path to the file.
* **_contents_** (object): options related to the contents of the file.
* **_compression_** (string): the type of compression used on the contents (null or gzip)
* **_source_** (string): the URL of the file contents. Supported schemes are http and [data][rfc2397]. Note: When using http, it is advisable to use the verification option to ensure the contents haven't been modified.
* **_source_** (string): the URL of the file contents. Supported schemes are http, https, and [data][rfc2397]. Note: When using http, it is advisable to use the verification option to ensure the contents haven't been modified.
* **_verification_** (object): options related to the verification of the file contents.
* **_hash_** (string): the hash of the config, in the form `<type>-<value>` where type is sha512.
* **_mode_** (integer): the file's permission mode. Note that the mode must be properly specified as a **decimal** value (i.e. 0644 -> 420).

View File

@@ -34,6 +34,6 @@ In the event that this doesn't yield any results, running as root may help. Ther
In the vast majority of cases, it will be immediately obvious why Ignition failed. If it's not, inspect the config that Ignition wrote into the log. This shows how Ignition interpreted the supplied configuration. The user-provided config may have a misspelled section or maybe an incorrect hierarchy.
[configspec]: configuration.md
[examples]: https://github.com/coreos/docs/blob/master/ignition/examples.md
[examples]: examples.md
[platforms]: supported-platforms.md
[troubleshooting]: #troubleshooting

View File

@@ -4,9 +4,10 @@ Ignition is currently only supported for the following platforms:
* [Bare Metal] - Use the `coreos.config.url` kernel parameter to provide a URL to the configuration. The URL can use the `http://` scheme to specify a remote config or the `oem://` scheme to specify a local config, rooted in `/usr/share/oem`.
* [PXE] - Use the `coreos.config.url` and `coreos.first_boot=1` (**in case of the very first PXE boot only**) kernel parameters to provide a URL to the configuration. The URL can use the `http://` scheme to specify a remote config or the `oem://` scheme to specify a local config, rooted in `/usr/share/oem`.
* [Amazon EC2] - Ignition will read its configuration from the userdata and append the SSH keys listed in the instance metadata.
* [Amazon EC2] - Ignition will read its configuration from the instance userdata. SSH keys are handled by coreos-metadata.
* [Microsoft Azure] - Ignition will read its configuration from the custom data provided to the instance. SSH keys are handled by the Azure Linux Agent.
* [VMware] - Use the VMware Guestinfo variables `coreos.config.data` and `coreos.config.data.encoding` to provide the config and its encoding to the virtual machine. Valid encodings are "", "base64", and "gzip+base64".
* [Google Compute Engine] - Ignition will read its configuration from the instance metadata entry named "user-data". SSH keys are handled by coreos-metadata.
Ignition is under active development so expect this list to expand in the coming months.
@@ -15,3 +16,4 @@ Ignition is under active development so expect this list to expand in the coming
[Amazon EC2]: https://github.com/coreos/docs/blob/master/os/booting-on-ec2.md
[Microsoft Azure]: https://github.com/coreos/docs/blob/master/os/booting-on-azure.md
[VMware]: https://github.com/coreos/docs/blob/master/os/booting-on-vmware.md
[Google Compute Engine]: https://github.com/coreos/docs/blob/master/os/booting-on-google-compute-engine.md

481
vendor/github.com/coreos/ignition/gimme.local generated vendored Executable file
View File

@@ -0,0 +1,481 @@
#!/bin/bash
# vim:noexpandtab:ts=2:sw=2:
#
#+ Usage: $(basename $0) [flags] [go-version] [version-prefix]
#+ -
#+ Version: ${GIMME_VERSION}
#+ -
#+ Install go! There are multiple types of installations available, with 'auto' being the default.
#+ If either 'auto' or 'binary' is specified as GIMME_TYPE, gimme will first check for an existing
#+ go installation. This behavior may be disabled by providing '-f/--force/force' as first positional
#+ argument.
#+ -
#+ Option flags:
#+ -h --help help - show this help text and exit
#+ -V --version version - show the version only and exit
#+ -f --force force - remove the existing go installation if present prior to install
#+ -l --list list - list installed go versions and exit
#+ -
#+ Influential env vars:
#+ -
#+ GIMME_GO_VERSION - version to install (*REQUIRED*, may be given as first positional arg)
#+ GIMME_VERSION_PREFIX - prefix for installed versions (default '${GIMME_VERSION_PREFIX}',
#+ may be given as second positional arg)
#+ GIMME_ARCH - arch to install (default '${GIMME_ARCH}')
#+ GIMME_BINARY_OSX - darwin-specific binary suffix (default '${GIMME_BINARY_OSX}')
#+ GIMME_ENV_PREFIX - prefix for env files (default '${GIMME_ENV_PREFIX}')
#+ GIMME_GO_GIT_REMOTE - git remote for git-based install (default '${GIMME_GO_GIT_REMOTE}')
#+ GIMME_OS - os to install (default '${GIMME_OS}')
#+ GIMME_TMP - temp directory (default '${GIMME_TMP}')
#+ GIMME_TYPE - install type to perform ('auto', 'binary', 'source', or 'git')
#+ (default '${GIMME_TYPE}')
#+ GIMME_DEBUG - enable tracing if non-empty
#+ GIMME_NO_ENV_ALIAS - disable creation of env 'alias' file when os and arch match host
#+ GIMME_SILENT_ENV - omit the 'go version' line from env file
#+ GIMME_CGO_ENABLED - enable build of cgo support
#+ GIMME_CC_FOR_TARGET - cross compiler for cgo support
#+ -
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Dan Buch, Tianon Gravi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
set -e
shopt -s nullglob
set -o pipefail
[[ ${GIMME_DEBUG} ]] && set -x
GIMME_VERSION=v0.2.3
# _do_curl "url" "file"
_do_curl() {
mkdir -p "$(dirname "${2}")"
if command -v curl > /dev/null ; then
curl -sSLf "${1}" -o "${2}" 2>/dev/null
return
fi
if command -v wget > /dev/null ; then
wget -q "${1}" -O "${2}" 2>/dev/null
return
fi
echo >&2 'error: no curl or wget found'
exit 1
}
# _do_curls "file" "url" ["url"...]
_do_curls() {
f="${1}"
shift
[[ ! -s "${f}" ]] || return 0
for url in "${@}" ; do
if _do_curl "${url}" "${f}" ; then
return
fi
done
rm -f "${f}"
return 1
}
# _binary "version" "file.tar.gz" "arch"
_binary() {
local version=${1}
local file=${2}
local arch=${3}
urls=(
"https://storage.googleapis.com/golang/go${version}.${GIMME_OS}-${arch}.tar.gz"
"https://go.googlecode.com/files/go${version}.${GIMME_OS}-${arch}.tar.gz"
"https://go.googlecode.com/files/go.${version}.${GIMME_OS}-${arch}.tar.gz"
)
if [ "${GIMME_OS}" = 'darwin' -a "${GIMME_BINARY_OSX}" ] ; then
urls=(
"https://storage.googleapis.com/golang/go${version}.${GIMME_OS}-${arch}-${GIMME_BINARY_OSX}.tar.gz"
"${urls[@]}"
)
fi
if [ "${arch}" = 'arm' ] ; then
# attempt "armv6l" vs just "arm" first (since that's what's officially published)
urls=(
"https://storage.googleapis.com/golang/go${version}.${GIMME_OS}-${arch}v6l.tar.gz" # go1.6beta2 & go1.6rc1
"https://storage.googleapis.com/golang/go${version}.${GIMME_OS}-${arch}6.tar.gz" # go1.6beta1
"${urls[@]}"
)
fi
_do_curls "${file}" "${urls[@]}"
}
# _source "version" "file.src.tar.gz"
_source() {
urls=(
"https://storage.googleapis.com/golang/go${1}.src.tar.gz"
"https://go.googlecode.com/files/go${1}.src.tar.gz"
"https://go.googlecode.com/files/go.${1}.src.tar.gz"
)
_do_curls "${2}" "${urls[@]}"
}
# _fetch "dir"
_fetch() {
mkdir -p "$(dirname "${1}")"
if [[ -d "${1}/.git" ]] ; then
(
cd "${1}"
git remote set-url origin "${GIMME_GO_GIT_REMOTE}"
git fetch -q --all && git fetch -q --tags
)
return
fi
git clone -q "${GIMME_GO_GIT_REMOTE}" "${1}"
}
# _checkout "version" "dir"
_checkout() {
_fetch "${2}"
( cd "${2}" && {
git reset -q --hard "origin/${1}" \
|| git reset -q --hard "origin/go${1}" \
|| { [ "${1}" = 'tip' ] && git reset -q --hard origin/master ; } \
|| git reset -q --hard "refs/tags/${1}" \
|| git reset -q --hard "refs/tags/go${1}"
} 2>/dev/null )
}
# _extract "file.tar.gz" "dir"
_extract() {
mkdir -p "${2}"
tar -xf "${1}" -C "${2}" --strip-components 1
}
# _setup_bootstrap
_setup_bootstrap() {
local versions=("1.6" "1.5" "1.4")
# try existing
for v in "${versions[@]}" ; do
for candidate in "${GIMME_ENV_PREFIX}/go${v}"*".env" ; do
if [ -s "$candidate" ]; then
export GOROOT_BOOTSTRAP="$(source "${candidate}" 2>/dev/null && go env GOROOT)"
return 0
fi
done
done
# try binary
for v in "${versions[@]}" ; do
if [ -n "$(_try_binary ${v} "${GIMME_HOSTARCH}")" ]; then
export GOROOT_BOOTSTRAP="${GIMME_VERSION_PREFIX}/go${v}.${GIMME_OS}.${GIMME_HOSTARCH}"
return 0
fi
done
echo >&2 "Unable to setup go bootstrap from existing or binary";
return 1;
}
# _compile "dir"
_compile() {
(
if grep -q GOROOT_BOOTSTRAP "${1}/src/make.bash" &> /dev/null; then
_setup_bootstrap || return 1
fi
cd "${1}"
if [[ -d .git ]] ; then
git clean -dfx -q
fi
cd src
export GOOS="${GIMME_OS}" GOARCH="${GIMME_ARCH}"
export CGO_ENABLED="${GIMME_CGO_ENABLED}"
export CC_FOR_TARGET="${GIMME_CC_FOR_TARGET}"
if [ "${GIMME_DEBUG}" = "2" ]; then
./make.bash 1>&2 || return 1
else
local make_log="${1}/make.${GOOS}.${GOARCH}.log"
./make.bash &> $make_log || return 1
fi
)
}
_can_compile() {
cat > "${GIMME_TMP}/test.go" <<'EOF'
package main
import "os"
func main() {
os.Exit(0)
}
EOF
"${1}/bin/go" run "${GIMME_TMP}/test.go"
}
# _env "dir"
_env() {
[ -d "${1}/bin" -a -x "${1}/bin/go" ] || return 1
# if we try to run a Darwin binary on Linux, we need to fail so 'auto' can fallback to cross-compiling from source
# automatically
GOROOT="${1}" "${1}/bin/go" version &> /dev/null || return 1
# https://twitter.com/davecheney/status/431581286918934528
# we have to GOROOT sometimes because we use official release binaries in unofficial locations :(
echo
if [[ "$(GOROOT="${1}" "${1}/bin/go" env GOHOSTOS)" = "${GIMME_OS}" ]] ; then
echo 'unset GOOS'
else
echo 'export GOOS="'"${GIMME_OS}"'"'
fi
if [[ "$(GOROOT="${1}" "${1}/bin/go" env GOHOSTARCH)" = "${GIMME_ARCH}" ]] ; then
echo 'unset GOARCH'
else
echo 'export GOARCH="'"${GIMME_ARCH}"'"'
fi
if ! _can_compile "${1}" >/dev/null 2>&1 ; then
# if the compile test fails without GOROOT, then we probably need GOROOT
echo 'export GOROOT="'"${1}"'"'
else
echo 'unset GOROOT'
fi
echo 'export PATH="'"${1}/bin"':${PATH}"'
if [[ -z "${GIMME_SILENT_ENV}" ]] ; then
echo 'go version >&2'
fi
echo
}
# _env_alias "dir" "env-file"
_env_alias() {
if [[ "${GIMME_NO_ENV_ALIAS}" ]] ; then
echo "${2}"
return
fi
if [[ "$(GOROOT="${1}" "${1}/bin/go" env GOHOSTOS)" = "${GIMME_OS}" && \
"$(GOROOT="${1}" "${1}/bin/go" env GOHOSTARCH)" = "${GIMME_ARCH}" ]] ; then
local dest="${GIMME_ENV_PREFIX}/go${GIMME_GO_VERSION}.env"
cp "${2}" "${dest}"
ln -sf "${dest}" "${GIMME_ENV_PREFIX}/latest.env"
echo "${dest}"
else
echo "${2}"
fi
}
_try_existing() {
local existing_ver="${GIMME_VERSION_PREFIX}/go${GIMME_GO_VERSION}.${GIMME_OS}.${GIMME_ARCH}"
local existing_env="${GIMME_ENV_PREFIX}/go${GIMME_GO_VERSION}.${GIMME_OS}.${GIMME_ARCH}.env"
if [[ -x "${existing_ver}/bin/go" && -s "${existing_env}" ]] ; then
cat "${existing_env}"
return
fi
return 1
}
# _try_binary "version" "arch"
_try_binary() {
local version=${1}
local arch=${2}
local bin_tgz="${GIMME_TMP}/go${version}.${GIMME_OS}.${arch}.tar.gz"
local bin_dir="${GIMME_VERSION_PREFIX}/go${version}.${GIMME_OS}.${arch}"
local bin_env="${GIMME_ENV_PREFIX}/go${version}.${GIMME_OS}.${arch}.env"
_binary "${version}" "${bin_tgz}" "${arch}" || return 1
_extract "${bin_tgz}" "${bin_dir}" || return 1
_env "${bin_dir}" | tee "${bin_env}" || return 1
echo "export GIMME_ENV=\"$(_env_alias "${bin_dir}" "${bin_env}")\""
}
_try_source() {
local src_tgz="${GIMME_TMP}/go${GIMME_GO_VERSION}.src.tar.gz"
local src_dir="${GIMME_VERSION_PREFIX}/go${GIMME_GO_VERSION}.src"
local src_env="${GIMME_ENV_PREFIX}/go${GIMME_GO_VERSION}.${GIMME_OS}.${GIMME_ARCH}.env"
_source "${GIMME_GO_VERSION}" "${src_tgz}" || return 1
_extract "${src_tgz}" "${src_dir}" || return 1
_compile "${src_dir}" || return 1
_env "${src_dir}" | tee "${src_env}" || return 1
echo "export GIMME_ENV=\"$(_env_alias "${src_dir}" "${src_env}")\""
}
_try_git() {
local git_dir="${GIMME_VERSION_PREFIX}/go"
local git_env="${GIMME_ENV_PREFIX}/go.git.${GIMME_OS}.${GIMME_ARCH}.env"
_checkout "${GIMME_GO_VERSION}" "${git_dir}" || return 1
_compile "${git_dir}" || return 1
_env "${git_dir}" | tee "${git_env}" || return 1
echo "export GIMME_ENV=\"$(_env_alias "${git_dir}" "${git_env}")\""
}
_wipe_version() {
local env_file="${GIMME_ENV_PREFIX}/go${1}.${GIMME_OS}.${GIMME_ARCH}.env"
if [[ -s "${env_file}" ]] ; then
rm -rf "$(awk -F\" '/GOROOT/ { print $2 }' "${env_file}")"
rm -f "${env_file}"
fi
}
_list_versions() {
if [ ! -d "${GIMME_VERSION_PREFIX}" ] ; then
return 0
fi
local current_version="$(go env GOROOT 2>/dev/null)"
current_version="${current_version##*/go}"
current_version="${current_version%%.${GIMME_OS}.*}"
for d in "${GIMME_VERSION_PREFIX}/go"*".${GIMME_OS}."* ; do
local cleaned="${d##*/go}"
cleaned="${cleaned%%.${GIMME_OS}.*}"
echo -en "${cleaned}"
if [[ $cleaned = $current_version ]] ; then
echo -en >&2 ' <= current'
fi
echo
done
}
_realpath() {
[ -d "$1" ] && echo "$(cd "$1" && pwd)" || echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")"
}
_assert_version_given() {
if [[ -z "${GIMME_GO_VERSION}" ]] ; then
echo >&2 'error: no GIMME_GO_VERSION supplied'
echo >&2 " ex: GIMME_GO_VERSION=1.4.1 ${0} ${@}"
echo >&2 " ex: ${0} 1.4.1 ${@}"
exit 1
fi
}
: ${GIMME_OS:=$(uname -s | tr '[:upper:]' '[:lower:]')}
: ${GIMME_HOSTOS:=$(uname -s | tr '[:upper:]' '[:lower:]')}
: ${GIMME_ARCH:=$(uname -m)}
: ${GIMME_HOSTARCH:=$(uname -m)}
: ${GIMME_ENV_PREFIX:=${HOME}/.gimme/envs}
: ${GIMME_VERSION_PREFIX:=${HOME}/.gimme/versions}
: ${GIMME_TMP:=${TMPDIR:-/tmp}/gimme}
: ${GIMME_GO_GIT_REMOTE:=https://github.com/golang/go.git}
: ${GIMME_TYPE:=auto} # 'auto', 'binary', 'source', or 'git'
: ${GIMME_BINARY_OSX:=osx10.8}
while [[ $# -gt 0 ]]; do
case "${1}" in
-h|--help|help|wat)
_old_ifs="$IFS"
IFS=';'
awk '/^#\+ / {
sub(/^#\+ /, "", $0) ;
sub(/-$/, "", $0) ;
print $0
}' "$0" | while read line ; do
eval "echo \"$line\""
done
IFS="$_old_ifs"
exit 0
;;
-V|--version|version)
echo "${GIMME_VERSION}"
exit 0
;;
-l|--list|list)
_list_versions
exit 0
;;
-f|--force|force)
force=1
;;
*)
break
;;
esac
shift
done
if [[ -n "${1}" ]] ; then
GIMME_GO_VERSION="${1}"
fi
if [[ -n "${2}" ]] ; then
GIMME_VERSION_PREFIX="${2}"
fi
case "${GIMME_ARCH}" in
x86_64) GIMME_ARCH=amd64 ;;
x86) GIMME_ARCH=386 ;;
arm64)
if [[ "${GIMME_GO_VERSION}" < "1.5" ]]; then
echo >&2 "error: ${GIMME_ARCH} is not supported by this go version"
echo >&2 "try go1.5 or newer"
exit 1
fi
if [[ "${GIMME_HOSTOS}" = "linux" && "${GIMME_HOSTARCH}" != "${GIMME_ARCH}" ]]; then
: ${GIMME_CC_FOR_TARGET:="aarch64-linux-gnu-gcc"}
fi
;;
arm*) GIMME_ARCH=arm ;;
esac
case "${GIMME_HOSTARCH}" in
x86_64) GIMME_HOSTARCH=amd64 ;;
x86) GIMME_HOSTARCH=386 ;;
arm64) ;;
arm*) GIMME_HOSTARCH=arm ;;
esac
_assert_version_given "$@"
[ ${force} ] && _wipe_version "${GIMME_GO_VERSION}"
unset GOARCH
unset GOBIN
unset GOOS
unset GOPATH
unset GOROOT
unset CGO_ENABLED
unset CC_FOR_TARGET
mkdir -p "${GIMME_VERSION_PREFIX}" "${GIMME_ENV_PREFIX}"
GIMME_VERSION_PREFIX="$(_realpath "${GIMME_VERSION_PREFIX}")"
GIMME_ENV_PREFIX="$(_realpath "${GIMME_ENV_PREFIX}")"
if ! case "${GIMME_TYPE}" in
binary) _try_existing || _try_binary "${GIMME_GO_VERSION}" "${GIMME_ARCH}" ;;
source) _try_source || _try_git ;;
git) _try_git ;;
auto) _try_existing || _try_binary "${GIMME_GO_VERSION}" "${GIMME_ARCH}" || _try_source || _try_git ;;
*)
echo >&2 "I don't know how to '${GIMME_TYPE}'."
echo >&2 " Try 'auto', 'binary', 'source', or 'git'."
exit 1
;;
esac ; then
echo >&2 "I don't have any idea what to do with '${GIMME_GO_VERSION}'."
echo >&2 " (using type '${GIMME_TYPE}')"
exit 1
fi

View File

@@ -16,9 +16,8 @@ package exec
import (
"encoding/json"
"errors"
"io/ioutil"
"net/http"
"net/url"
"time"
"github.com/coreos/ignition/config"
@@ -34,18 +33,13 @@ const (
DefaultOnlineTimeout = time.Minute
)
var (
ErrSchemeUnsupported = errors.New("unsupported url scheme")
ErrNetworkFailure = errors.New("network failure")
)
var (
baseConfig = types.Config{
Ignition: types.Ignition{Version: types.IgnitionVersion(types.MaxVersion)},
Storage: types.Storage{
Filesystems: []types.Filesystem{{
Name: "root",
Path: "/sysroot",
Path: func(p types.Path) *types.Path { return &p }("/sysroot"),
}},
},
}
@@ -53,12 +47,13 @@ var (
// Engine represents the entity that fetches and executes a configuration.
type Engine struct {
ConfigCache string
OnlineTimeout time.Duration
Logger *log.Logger
Root string
Provider providers.Provider
OemConfig types.Config
ConfigCache string
OnlineTimeout time.Duration
Logger *log.Logger
Root string
Provider providers.Provider
OemBaseConfig types.Config
DefaultUserConfig types.Config
}
// Run executes the stage of the given name. It returns true if the stage
@@ -66,17 +61,18 @@ type Engine struct {
func (e Engine) Run(stageName string) bool {
cfg, err := e.acquireConfig()
switch err {
case config.ErrEmpty, nil:
e.Logger.PushPrefix(stageName)
defer e.Logger.PopPrefix()
return stages.Get(stageName).Create(e.Logger, e.Root).Run(config.Append(config.Append(baseConfig, e.OemConfig), cfg))
case config.ErrCloudConfig, config.ErrScript:
e.Logger.Info("%v: ignoring and exiting...", err)
return true
case nil:
case config.ErrCloudConfig, config.ErrScript, config.ErrEmpty:
e.Logger.Info("%v: ignoring user-provided config", err)
cfg = e.DefaultUserConfig
default:
e.Logger.Crit("failed to acquire config: %v", err)
return false
}
e.Logger.PushPrefix(stageName)
defer e.Logger.PopPrefix()
return stages.Get(stageName).Create(e.Logger, e.Root).Run(config.Append(baseConfig, config.Append(e.OemBaseConfig, cfg)))
}
// acquireConfig returns the configuration, first checking a local cache
@@ -159,16 +155,9 @@ func (e Engine) renderConfig(cfg types.Config) (types.Config, error) {
// fetchReferencedConfig fetches, renders, and attempts to verify the requested
// config.
func (e Engine) fetchReferencedConfig(cfgRef types.ConfigReference) (types.Config, error) {
var rawCfg []byte
switch cfgRef.Source.Scheme {
case "http":
rawCfg = util.NewHttpClient(e.Logger).
FetchConfig(cfgRef.Source.String(), http.StatusOK, http.StatusNoContent)
if rawCfg == nil {
return types.Config{}, ErrNetworkFailure
}
default:
return types.Config{}, ErrSchemeUnsupported
rawCfg, err := util.FetchResource(e.Logger, url.URL(cfgRef.Source))
if err != nil {
return types.Config{}, err
}
if err := util.AssertValid(cfgRef.Verification, rawCfg); err != nil {

View File

@@ -235,6 +235,7 @@ func (s stage) createFilesystem(fs types.FilesystemMount) error {
}
case "ext4":
mkfs = "/sbin/mkfs.ext4"
args = append(args, "-p")
if fs.Create.Force {
args = append(args, "-F")
}

View File

@@ -66,13 +66,13 @@ func (s stage) Run(config types.Config) bool {
return false
}
if err := s.createUnits(config); err != nil {
s.Logger.Crit("failed to create units: %v", err)
if err := s.createFilesystemsFiles(config); err != nil {
s.Logger.Crit("failed to create files: %v", err)
return false
}
if err := s.createFilesystemsFiles(config); err != nil {
s.Logger.Crit("failed to create files: %v", err)
if err := s.createUnits(config); err != nil {
s.Logger.Crit("failed to create units: %v", err)
return false
}
@@ -133,8 +133,8 @@ func (s stage) createFiles(fs types.Filesystem, files []types.File) error {
s.Logger.PushPrefix("createFiles")
defer s.Logger.PopPrefix()
mnt := string(fs.Path)
if len(mnt) == 0 {
var mnt string
if fs.Path == nil {
var err error
mnt, err = ioutil.TempDir("", "ignition-files")
if err != nil {
@@ -155,6 +155,8 @@ func (s stage) createFiles(fs types.Filesystem, files []types.File) error {
func() error { return syscall.Unmount(mnt, 0) },
"unmounting %q at %q", dev, mnt,
)
} else {
mnt = string(*fs.Path)
}
u := util.Util{

View File

@@ -32,6 +32,9 @@ func TestMapFilesToFilesystems(t *testing.T) {
err error
}
fs1 := types.Path("/fs1")
fs2 := types.Path("/fs2")
tests := []struct {
in in
out out
@@ -53,21 +56,21 @@ func TestMapFilesToFilesystems(t *testing.T) {
},
{
in: in{config: types.Config{Storage: types.Storage{
Filesystems: []types.Filesystem{{Name: "fs1", Path: "/fs1"}, {Name: "fs2", Path: "/fs2"}},
Filesystems: []types.Filesystem{{Name: "fs1", Path: &fs1}, {Name: "fs2", Path: &fs2}},
Files: []types.File{{Filesystem: "fs1", Path: "/foo"}, {Filesystem: "fs2", Path: "/bar"}},
}}},
out: out{files: map[types.Filesystem][]types.File{
types.Filesystem{Name: "fs1", Path: "/fs1"}: {{Filesystem: "fs1", Path: "/foo"}},
types.Filesystem{Name: "fs2", Path: "/fs2"}: {{Filesystem: "fs2", Path: "/bar"}},
types.Filesystem{Name: "fs1", Path: &fs1}: {{Filesystem: "fs1", Path: "/foo"}},
types.Filesystem{Name: "fs2", Path: &fs2}: {{Filesystem: "fs2", Path: "/bar"}},
}},
},
{
in: in{config: types.Config{Storage: types.Storage{
Filesystems: []types.Filesystem{{Name: "fs1"}, {Name: "fs1", Path: "/fs1"}},
Filesystems: []types.Filesystem{{Name: "fs1"}, {Name: "fs1", Path: &fs1}},
Files: []types.File{{Filesystem: "fs1", Path: "/foo"}, {Filesystem: "fs1", Path: "/bar"}},
}}},
out: out{files: map[types.Filesystem][]types.File{
types.Filesystem{Name: "fs1", Path: "/fs1"}: {{Filesystem: "fs1", Path: "/foo"}, {Filesystem: "fs1", Path: "/bar"}},
types.Filesystem{Name: "fs1", Path: &fs1}: {{Filesystem: "fs1", Path: "/foo"}, {Filesystem: "fs1", Path: "/bar"}},
}},
},
}

View File

@@ -17,17 +17,14 @@ package util
import (
"bytes"
"compress/gzip"
"errors"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"github.com/coreos/ignition/config/types"
"github.com/coreos/ignition/internal/log"
"github.com/coreos/ignition/internal/util"
"github.com/vincent-petithory/dataurl"
)
const (
@@ -35,11 +32,6 @@ const (
DefaultFilePermissions os.FileMode = 0644
)
var (
ErrSchemeUnsupported = errors.New("unsupported source scheme")
ErrStatusBad = errors.New("bad HTTP response status")
)
type File struct {
Path types.Path
Contents []byte
@@ -53,7 +45,7 @@ func RenderFile(l *log.Logger, f types.File) *File {
var err error
fetch := func() error {
contents, err = fetchFile(l, f)
contents, err = util.FetchResource(l, url.URL(f.Contents.Source))
return err
}
@@ -85,33 +77,6 @@ func RenderFile(l *log.Logger, f types.File) *File {
}
}
func fetchFile(l *log.Logger, f types.File) ([]byte, error) {
switch f.Contents.Source.Scheme {
case "http":
client := util.NewHttpClient(l)
data, status, err := client.Get(f.Contents.Source.String())
if err != nil {
return nil, err
}
l.Debug("GET result: %s", http.StatusText(status))
if status != http.StatusOK {
return nil, ErrStatusBad
}
return data, nil
case "data":
url, err := dataurl.DecodeString(f.Contents.Source.String())
if err != nil {
return nil, err
}
return url.Data, nil
default:
return nil, ErrSchemeUnsupported
}
}
func decompressFile(l *log.Logger, f types.File, contents []byte) ([]byte, error) {
switch f.Contents.Compression {
case "":

View File

@@ -56,6 +56,9 @@ func (u Util) MaskUnit(unit types.SystemdUnit) error {
if err := mkdirForFile(path); err != nil {
return err
}
if err := os.RemoveAll(path); err != nil {
return err
}
return os.Symlink("/dev/null", path)
}

View File

@@ -124,10 +124,12 @@ func (l *Logger) LogCmd(cmd *exec.Cmd, format string, a ...interface{}) error {
} else {
l.Debug("executing: %v %v", cmd.Path, cmd.Args[1:])
}
stdout := &bytes.Buffer{}
stderr := &bytes.Buffer{}
cmd.Stdout = stdout
cmd.Stderr = stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("%v: Stderr: %q", err, stderr.Bytes())
return fmt.Errorf("%v: Stdout: %q Stderr: %q", err, stdout.Bytes(), stderr.Bytes())
}
return nil
}

View File

@@ -84,12 +84,13 @@ func main() {
oemConfig := oem.MustGet(flags.oem.String())
engine := exec.Engine{
Root: flags.root,
OnlineTimeout: flags.onlineTimeout,
Logger: &logger,
ConfigCache: flags.configCache,
Provider: oemConfig.Provider().Create(&logger),
OemConfig: oemConfig.Config(),
Root: flags.root,
OnlineTimeout: flags.onlineTimeout,
Logger: &logger,
ConfigCache: flags.configCache,
Provider: oemConfig.Provider().Create(&logger),
OemBaseConfig: oemConfig.BaseConfig(),
DefaultUserConfig: oemConfig.DefaultUserConfig(),
}
if !engine.Run(flags.stage.String()) {

View File

@@ -26,14 +26,17 @@ import (
"github.com/coreos/ignition/internal/providers/noop"
"github.com/coreos/ignition/internal/providers/vmware"
"github.com/coreos/ignition/internal/registry"
"github.com/vincent-petithory/dataurl"
)
// Config represents a set of command line flags that map to a particular OEM.
type Config struct {
name string
flags map[string]string
provider providers.ProviderCreator
config types.Config
name string
flags map[string]string
provider providers.ProviderCreator
baseConfig types.Config
defaultUserConfig types.Config
}
func (c Config) Name() string {
@@ -48,8 +51,12 @@ func (c Config) Provider() providers.ProviderCreator {
return c.provider
}
func (c Config) Config() types.Config {
return c.config
func (c Config) BaseConfig() types.Config {
return c.baseConfig
}
func (c Config) DefaultUserConfig() types.Config {
return c.defaultUserConfig
}
var configs = registry.Create("oem configs")
@@ -85,7 +92,7 @@ func init() {
flags: map[string]string{
"online-timeout": "0",
},
config: types.Config{
baseConfig: types.Config{
Systemd: types.Systemd{
Units: []types.SystemdUnit{{
Name: "coreos-metadata-sshkeys@.service",
@@ -101,14 +108,44 @@ func init() {
configs.Register(Config{
name: "gce",
provider: gce.Creator{},
config: types.Config{
baseConfig: types.Config{
Systemd: types.Systemd{
Units: []types.SystemdUnit{{
Name: "coreos-metadata-sshkeys@.service",
Enable: true,
}},
Units: []types.SystemdUnit{
{Enable: true, Name: "coreos-metadata-sshkeys@.service"},
{Enable: true, Name: "google-accounts-manager.service"},
{Enable: true, Name: "google-address-manager.service"},
{Enable: true, Name: "google-clock-sync-manager.service"},
{Enable: true, Name: "google-startup-scripts-onboot.service"},
{Enable: true, Name: "google-startup-scripts.service"},
},
},
Storage: types.Storage{
Files: []types.File{
serviceFromOem("google-accounts-manager.service"),
serviceFromOem("google-address-manager.service"),
serviceFromOem("google-clock-sync-manager.service"),
serviceFromOem("google-startup-scripts-onboot.service"),
serviceFromOem("google-startup-scripts.service"),
{
Filesystem: "root",
Path: "/etc/hosts",
Mode: 0444,
Contents: contentsFromString("169.254.169.254 metadata\n127.0.0.1 localhost\n"),
},
{
Filesystem: "root",
Path: "/etc/profile.d/google-cloud-sdk.sh",
Mode: 0444,
Contents: contentsFromString(`#!/bin/sh
alias gcloud="(docker images google/cloud-sdk || docker pull google/cloud-sdk) > /dev/null;docker run -t -i --net="host" -v $HOME/.config:/.config -v /var/run/docker.sock:/var/run/doker.sock google/cloud-sdk gcloud"
alias gcutil="(docker images google/cloud-sdk || docker pull google/cloud-sdk) > /dev/null;docker run -t -i --net="host" -v $HOME/.config:/.config google/cloud-sdk gcutil"
alias gsutil="(docker images google/cloud-sdk || docker pull google/cloud-sdk) > /dev/null;docker run -t -i --net="host" -v $HOME/.config:/.config google/cloud-sdk gsutil"
`),
},
},
},
},
defaultUserConfig: types.Config{Systemd: types.Systemd{Units: []types.SystemdUnit{userCloudInit("GCE", "gce")}}},
})
configs.Register(Config{
name: "hyperv",
@@ -168,3 +205,49 @@ func MustGet(name string) Config {
func Names() (names []string) {
return configs.Names()
}
func contentsFromString(data string) types.FileContents {
return types.FileContents{
Source: types.Url{
Scheme: "data",
Opaque: "," + dataurl.EscapeString(data),
},
}
}
func contentsFromOem(path string) types.FileContents {
return types.FileContents{
Source: types.Url{
Scheme: "oem",
Path: path,
},
}
}
func userCloudInit(name string, oem string) types.SystemdUnit {
contents := `[Unit]
Description=Cloudinit from %s metadata
[Service]
Type=oneshot
ExecStart=/usr/bin/coreos-cloudinit --oem=%s
[Install]
WantedBy=multi-user.target
`
return types.SystemdUnit{
Name: "oem-cloudinit.service",
Enable: true,
Contents: fmt.Sprintf(contents, name, oem),
}
}
func serviceFromOem(unit string) types.File {
return types.File{
Filesystem: "root",
Path: types.Path("/etc/systemd/system/" + unit),
Mode: 0444,
Contents: contentsFromOem("/units/" + unit),
}
}

View File

@@ -18,14 +18,9 @@
package cmdline
import (
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/coreos/ignition/config"
@@ -33,7 +28,6 @@ import (
"github.com/coreos/ignition/internal/log"
"github.com/coreos/ignition/internal/providers"
putil "github.com/coreos/ignition/internal/providers/util"
"github.com/coreos/ignition/internal/systemd"
"github.com/coreos/ignition/internal/util"
)
@@ -42,9 +36,6 @@ const (
maxBackoff = 30 * time.Second
cmdlinePath = "/proc/cmdline"
cmdlineUrlFlag = "coreos.config.url"
oemDevicePath = "/dev/disk/by-label/OEM" // Device link where oem partition is found.
oemDirPath = "/usr/share/oem" // OEM dir within root fs to consider for pxe scenarios.
oemMountPath = "/mnt/oem" // Mountpoint where oem partition is mounted when present.
)
type Creator struct{}
@@ -132,77 +123,12 @@ func (p *provider) getRawConfig() bool {
return false
}
switch url.Scheme {
case "http":
p.rawConfig = p.client.FetchConfig(p.configUrl, http.StatusOK, http.StatusNoContent)
if p.rawConfig == nil {
return false
}
case "oem":
path := filepath.Clean(url.Path)
if !filepath.IsAbs(path) {
p.logger.Err("oem path is not absolute: %q", url.Path)
return false
}
// check if present under oemDirPath, if so use it.
absPath := filepath.Join(oemDirPath, path)
p.rawConfig, err = ioutil.ReadFile(absPath)
if os.IsNotExist(err) {
p.logger.Info("oem config not found in %q, trying %q",
oemDirPath, oemMountPath)
// try oemMountPath, requires mounting it.
err = p.mountOEM()
if err == nil {
absPath := filepath.Join(oemMountPath, path)
p.rawConfig, err = ioutil.ReadFile(absPath)
p.umountOEM()
}
}
if err != nil {
p.logger.Err("failed to read oem config: %v", err)
return false
}
default:
p.logger.Err("unsupported url scheme: %q", url.Scheme)
data, err := util.FetchResource(p.logger, *url)
if err != nil {
p.logger.Err("failed to fetch %v: %v", url, err)
return false
}
p.rawConfig = data
return true
}
// mountOEM waits for the presence of and mounts the oem partition @ oemMountPath.
func (p *provider) mountOEM() error {
dev := []string{oemDevicePath}
if err := systemd.WaitOnDevices(dev, "oem-cmdline"); err != nil {
p.logger.Err("failed to wait for oem device: %v", err)
return err
}
if err := os.MkdirAll(oemMountPath, 0700); err != nil {
p.logger.Err("failed to create oem mount point: %v", err)
return err
}
if err := p.logger.LogOp(
func() error {
return syscall.Mount(dev[0], oemMountPath, "ext4", 0, "")
},
"mounting %q at %q", oemDevicePath, oemMountPath,
); err != nil {
return fmt.Errorf("failed to mount device %q at %q: %v",
oemDevicePath, oemMountPath, err)
}
return nil
}
// umountOEM unmounts the oem partition @ oemMountPath.
func (p *provider) umountOEM() {
p.logger.LogOp(
func() error { return syscall.Unmount(oemMountPath, 0) },
"unmounting %q", oemMountPath,
)
}

View File

@@ -45,7 +45,7 @@ func Begin(logger *log.Logger, dev string) *Operation {
// CreatePartition adds the supplied partition to the list of partitions to be created as part of an operation.
func (op *Operation) CreatePartition(p Partition) {
// XXX(vc): no checking is performed here, since we perform checking at yaml/json parsing, Commit() will just fail on badness.
// XXX(vc): no checking is performed here, since we perform checking at json parsing, Commit() will just fail on badness.
op.parts = append(op.parts, p)
}

142
vendor/github.com/coreos/ignition/internal/util/url.go generated vendored Normal file
View File

@@ -0,0 +1,142 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"syscall"
"github.com/coreos/ignition/internal/log"
"github.com/coreos/ignition/internal/systemd"
"github.com/vincent-petithory/dataurl"
)
var (
ErrSchemeUnsupported = errors.New("unsupported source scheme")
ErrPathNotAbsolute = errors.New("path is not absolute")
ErrNotFound = errors.New("resource not found")
ErrFailed = errors.New("failed to fetch resource")
)
const (
oemDevicePath = "/dev/disk/by-label/OEM" // Device link where oem partition is found.
oemDirPath = "/usr/share/oem" // OEM dir within root fs to consider for pxe scenarios.
oemMountPath = "/mnt/oem" // Mountpoint where oem partition is mounted when present.
)
// FetchResource fetches a resource given a URL. The supported schemes are http, data, and oem.
func FetchResource(l *log.Logger, u url.URL) ([]byte, error) {
switch u.Scheme {
case "http", "https":
client := NewHttpClient(l)
data, status, err := client.Get(u.String())
if err != nil {
return nil, err
}
l.Debug("GET result: %s", http.StatusText(status))
switch status {
case http.StatusOK, http.StatusNoContent:
return data, nil
case http.StatusNotFound:
return nil, ErrNotFound
default:
return nil, ErrFailed
}
case "data":
url, err := dataurl.DecodeString(u.String())
if err != nil {
return nil, err
}
return url.Data, nil
case "oem":
path := filepath.Clean(u.Path)
if !filepath.IsAbs(path) {
l.Err("oem path is not absolute: %q", u.Path)
return nil, ErrPathNotAbsolute
}
// check if present under oemDirPath, if so use it.
absPath := filepath.Join(oemDirPath, path)
data, err := ioutil.ReadFile(absPath)
if os.IsNotExist(err) {
l.Info("oem config not found in %q, trying %q",
oemDirPath, oemMountPath)
// try oemMountPath, requires mounting it.
err := mountOEM(l)
if err != nil {
l.Err("failed to mount oem partition: %v", err)
return nil, ErrFailed
}
absPath := filepath.Join(oemMountPath, path)
data, err = ioutil.ReadFile(absPath)
umountOEM(l)
} else if err != nil {
l.Err("failed to read oem config: %v", err)
return nil, ErrFailed
}
return data, nil
default:
return nil, ErrSchemeUnsupported
}
}
// mountOEM waits for the presence of and mounts the oem partition at oemMountPath.
func mountOEM(l *log.Logger) error {
dev := []string{oemDevicePath}
if err := systemd.WaitOnDevices(dev, "oem-cmdline"); err != nil {
l.Err("failed to wait for oem device: %v", err)
return err
}
if err := os.MkdirAll(oemMountPath, 0700); err != nil {
l.Err("failed to create oem mount point: %v", err)
return err
}
if err := l.LogOp(
func() error {
return syscall.Mount(dev[0], oemMountPath, "ext4", 0, "")
},
"mounting %q at %q", oemDevicePath, oemMountPath,
); err != nil {
return fmt.Errorf("failed to mount device %q at %q: %v",
oemDevicePath, oemMountPath, err)
}
return nil
}
// umountOEM unmounts the oem partition at oemMountPath.
func umountOEM(l *log.Logger) {
l.LogOp(
func() error { return syscall.Unmount(oemMountPath, 0) },
"unmounting %q", oemMountPath,
)
}

9
vendor/github.com/go-yaml/yaml/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,9 @@
language: go
go:
- 1.4
- 1.5
- 1.6
- tip
go_import_path: gopkg.in/yaml.v2

188
vendor/github.com/go-yaml/yaml/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,188 @@
Copyright (c) 2011-2014 - Canonical Inc.
This software is licensed under the LGPLv3, included below.
As a special exception to the GNU Lesser General Public License version 3
("LGPL3"), the copyright holders of this Library give you permission to
convey to a third party a Combined Work that links statically or dynamically
to this Library without providing any Minimal Corresponding Source or
Minimal Application Code as set out in 4d or providing the installation
information set out in section 4e, provided that you comply with the other
provisions of LGPL3 and provided that you meet, for the Application the
terms and conditions of the license(s) which apply to the Application.
Except as stated in this special exception, the provisions of LGPL3 will
continue to comply in full to this Library. If you modify this Library, you
may apply this exception to your version of this Library, but you are not
obliged to do so. If you do not wish to do so, delete this exception
statement from your version. This exception does not (and cannot) modify any
license terms which apply to the Application, with which you must still
comply.
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

31
vendor/github.com/go-yaml/yaml/LICENSE.libyaml generated vendored Normal file
View File

@@ -0,0 +1,31 @@
The following files were ported to Go from C files of libyaml, and thus
are still covered by their original copyright and license:
apic.go
emitterc.go
parserc.go
readerc.go
scannerc.go
writerc.go
yamlh.go
yamlprivateh.go
Copyright (c) 2006 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

131
vendor/github.com/go-yaml/yaml/README.md generated vendored Normal file
View File

@@ -0,0 +1,131 @@
# YAML support for the Go language
Introduction
------------
The yaml package enables Go programs to comfortably encode and decode YAML
values. It was developed within [Canonical](https://www.canonical.com) as
part of the [juju](https://juju.ubuntu.com) project, and is based on a
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
C library to parse and generate YAML data quickly and reliably.
Compatibility
-------------
The yaml package supports most of YAML 1.1 and 1.2, including support for
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
implemented, and base-60 floats from YAML 1.1 are purposefully not
supported since they're a poor design and are gone in YAML 1.2.
Installation and usage
----------------------
The import path for the package is *gopkg.in/yaml.v2*.
To install it, run:
go get gopkg.in/yaml.v2
API documentation
-----------------
If opened in a browser, the import path itself leads to the API documentation:
* [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
API stability
-------------
The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
License
-------
The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
Example
-------
```Go
package main
import (
"fmt"
"log"
"gopkg.in/yaml.v2"
)
var data = `
a: Easy!
b:
c: 2
d: [3, 4]
`
type T struct {
A string
B struct {
RenamedC int `yaml:"c"`
D []int `yaml:",flow"`
}
}
func main() {
t := T{}
err := yaml.Unmarshal([]byte(data), &t)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- t:\n%v\n\n", t)
d, err := yaml.Marshal(&t)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- t dump:\n%s\n\n", string(d))
m := make(map[interface{}]interface{})
err = yaml.Unmarshal([]byte(data), &m)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- m:\n%v\n\n", m)
d, err = yaml.Marshal(&m)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- m dump:\n%s\n\n", string(d))
}
```
This example will generate the following output:
```
--- t:
{Easy! {2 [3 4]}}
--- t dump:
a: Easy!
b:
c: 2
d: [3, 4]
--- m:
map[a:Easy! b:map[c:2 d:[3 4]]]
--- m dump:
a: Easy!
b:
c: 2
d:
- 3
- 4
```

742
vendor/github.com/go-yaml/yaml/apic.go generated vendored Normal file
View File

@@ -0,0 +1,742 @@
package yaml
import (
"io"
"os"
)
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
// Check if we can move the queue at the beginning of the buffer.
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
if parser.tokens_head != len(parser.tokens) {
copy(parser.tokens, parser.tokens[parser.tokens_head:])
}
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
parser.tokens_head = 0
}
parser.tokens = append(parser.tokens, *token)
if pos < 0 {
return
}
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
parser.tokens[parser.tokens_head+pos] = *token
}
// Create a new parser object.
func yaml_parser_initialize(parser *yaml_parser_t) bool {
*parser = yaml_parser_t{
raw_buffer: make([]byte, 0, input_raw_buffer_size),
buffer: make([]byte, 0, input_buffer_size),
}
return true
}
// Destroy a parser object.
func yaml_parser_delete(parser *yaml_parser_t) {
*parser = yaml_parser_t{}
}
// String read handler.
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
if parser.input_pos == len(parser.input) {
return 0, io.EOF
}
n = copy(buffer, parser.input[parser.input_pos:])
parser.input_pos += n
return n, nil
}
// File read handler.
func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
return parser.input_file.Read(buffer)
}
// Set a string input.
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_string_read_handler
parser.input = input
parser.input_pos = 0
}
// Set a file input.
func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_file_read_handler
parser.input_file = file
}
// Set the source encoding.
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
if parser.encoding != yaml_ANY_ENCODING {
panic("must set the encoding only once")
}
parser.encoding = encoding
}
// Create a new emitter object.
func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
*emitter = yaml_emitter_t{
buffer: make([]byte, output_buffer_size),
raw_buffer: make([]byte, 0, output_raw_buffer_size),
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
events: make([]yaml_event_t, 0, initial_queue_size),
}
return true
}
// Destroy an emitter object.
func yaml_emitter_delete(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{}
}
// String write handler.
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
return nil
}
// File write handler.
func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_file.Write(buffer)
return err
}
// Set a string output.
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_string_write_handler
emitter.output_buffer = output_buffer
}
// Set a file output.
func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_file_write_handler
emitter.output_file = file
}
// Set the output encoding.
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
if emitter.encoding != yaml_ANY_ENCODING {
panic("must set the output encoding only once")
}
emitter.encoding = encoding
}
// Set the canonical output style.
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
emitter.canonical = canonical
}
//// Set the indentation increment.
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
if indent < 2 || indent > 9 {
indent = 2
}
emitter.best_indent = indent
}
// Set the preferred line width.
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
if width < 0 {
width = -1
}
emitter.best_width = width
}
// Set if unescaped non-ASCII characters are allowed.
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
emitter.unicode = unicode
}
// Set the preferred line break character.
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
emitter.line_break = line_break
}
///*
// * Destroy a token object.
// */
//
//YAML_DECLARE(void)
//yaml_token_delete(yaml_token_t *token)
//{
// assert(token); // Non-NULL token object expected.
//
// switch (token.type)
// {
// case YAML_TAG_DIRECTIVE_TOKEN:
// yaml_free(token.data.tag_directive.handle);
// yaml_free(token.data.tag_directive.prefix);
// break;
//
// case YAML_ALIAS_TOKEN:
// yaml_free(token.data.alias.value);
// break;
//
// case YAML_ANCHOR_TOKEN:
// yaml_free(token.data.anchor.value);
// break;
//
// case YAML_TAG_TOKEN:
// yaml_free(token.data.tag.handle);
// yaml_free(token.data.tag.suffix);
// break;
//
// case YAML_SCALAR_TOKEN:
// yaml_free(token.data.scalar.value);
// break;
//
// default:
// break;
// }
//
// memset(token, 0, sizeof(yaml_token_t));
//}
//
///*
// * Check if a string is a valid UTF-8 sequence.
// *
// * Check 'reader.c' for more details on UTF-8 encoding.
// */
//
//static int
//yaml_check_utf8(yaml_char_t *start, size_t length)
//{
// yaml_char_t *end = start+length;
// yaml_char_t *pointer = start;
//
// while (pointer < end) {
// unsigned char octet;
// unsigned int width;
// unsigned int value;
// size_t k;
//
// octet = pointer[0];
// width = (octet & 0x80) == 0x00 ? 1 :
// (octet & 0xE0) == 0xC0 ? 2 :
// (octet & 0xF0) == 0xE0 ? 3 :
// (octet & 0xF8) == 0xF0 ? 4 : 0;
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
// if (!width) return 0;
// if (pointer+width > end) return 0;
// for (k = 1; k < width; k ++) {
// octet = pointer[k];
// if ((octet & 0xC0) != 0x80) return 0;
// value = (value << 6) + (octet & 0x3F);
// }
// if (!((width == 1) ||
// (width == 2 && value >= 0x80) ||
// (width == 3 && value >= 0x800) ||
// (width == 4 && value >= 0x10000))) return 0;
//
// pointer += width;
// }
//
// return 1;
//}
//
// Create STREAM-START.
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
*event = yaml_event_t{
typ: yaml_STREAM_START_EVENT,
encoding: encoding,
}
return true
}
// Create STREAM-END.
func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_STREAM_END_EVENT,
}
return true
}
// Create DOCUMENT-START.
func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t, implicit bool) bool {
*event = yaml_event_t{
typ: yaml_DOCUMENT_START_EVENT,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: implicit,
}
return true
}
// Create DOCUMENT-END.
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
*event = yaml_event_t{
typ: yaml_DOCUMENT_END_EVENT,
implicit: implicit,
}
return true
}
///*
// * Create ALIAS.
// */
//
//YAML_DECLARE(int)
//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
//{
// mark yaml_mark_t = { 0, 0, 0 }
// anchor_copy *yaml_char_t = NULL
//
// assert(event) // Non-NULL event object is expected.
// assert(anchor) // Non-NULL anchor is expected.
//
// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
//
// anchor_copy = yaml_strdup(anchor)
// if (!anchor_copy)
// return 0
//
// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
//
// return 1
//}
// Create SCALAR.
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
*event = yaml_event_t{
typ: yaml_SCALAR_EVENT,
anchor: anchor,
tag: tag,
value: value,
implicit: plain_implicit,
quoted_implicit: quoted_implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-START.
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-END.
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_END_EVENT,
}
return true
}
// Create MAPPING-START.
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
*event = yaml_event_t{
typ: yaml_MAPPING_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create MAPPING-END.
func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_MAPPING_END_EVENT,
}
return true
}
// Destroy an event object.
func yaml_event_delete(event *yaml_event_t) {
*event = yaml_event_t{}
}
///*
// * Create a document object.
// */
//
//YAML_DECLARE(int)
//yaml_document_initialize(document *yaml_document_t,
// version_directive *yaml_version_directive_t,
// tag_directives_start *yaml_tag_directive_t,
// tag_directives_end *yaml_tag_directive_t,
// start_implicit int, end_implicit int)
//{
// struct {
// error yaml_error_type_t
// } context
// struct {
// start *yaml_node_t
// end *yaml_node_t
// top *yaml_node_t
// } nodes = { NULL, NULL, NULL }
// version_directive_copy *yaml_version_directive_t = NULL
// struct {
// start *yaml_tag_directive_t
// end *yaml_tag_directive_t
// top *yaml_tag_directive_t
// } tag_directives_copy = { NULL, NULL, NULL }
// value yaml_tag_directive_t = { NULL, NULL }
// mark yaml_mark_t = { 0, 0, 0 }
//
// assert(document) // Non-NULL document object is expected.
// assert((tag_directives_start && tag_directives_end) ||
// (tag_directives_start == tag_directives_end))
// // Valid tag directives are expected.
//
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
//
// if (version_directive) {
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
// if (!version_directive_copy) goto error
// version_directive_copy.major = version_directive.major
// version_directive_copy.minor = version_directive.minor
// }
//
// if (tag_directives_start != tag_directives_end) {
// tag_directive *yaml_tag_directive_t
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
// goto error
// for (tag_directive = tag_directives_start
// tag_directive != tag_directives_end; tag_directive ++) {
// assert(tag_directive.handle)
// assert(tag_directive.prefix)
// if (!yaml_check_utf8(tag_directive.handle,
// strlen((char *)tag_directive.handle)))
// goto error
// if (!yaml_check_utf8(tag_directive.prefix,
// strlen((char *)tag_directive.prefix)))
// goto error
// value.handle = yaml_strdup(tag_directive.handle)
// value.prefix = yaml_strdup(tag_directive.prefix)
// if (!value.handle || !value.prefix) goto error
// if (!PUSH(&context, tag_directives_copy, value))
// goto error
// value.handle = NULL
// value.prefix = NULL
// }
// }
//
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
// tag_directives_copy.start, tag_directives_copy.top,
// start_implicit, end_implicit, mark, mark)
//
// return 1
//
//error:
// STACK_DEL(&context, nodes)
// yaml_free(version_directive_copy)
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
// }
// STACK_DEL(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
//
// return 0
//}
//
///*
// * Destroy a document object.
// */
//
//YAML_DECLARE(void)
//yaml_document_delete(document *yaml_document_t)
//{
// struct {
// error yaml_error_type_t
// } context
// tag_directive *yaml_tag_directive_t
//
// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
//
// assert(document) // Non-NULL document object is expected.
//
// while (!STACK_EMPTY(&context, document.nodes)) {
// node yaml_node_t = POP(&context, document.nodes)
// yaml_free(node.tag)
// switch (node.type) {
// case YAML_SCALAR_NODE:
// yaml_free(node.data.scalar.value)
// break
// case YAML_SEQUENCE_NODE:
// STACK_DEL(&context, node.data.sequence.items)
// break
// case YAML_MAPPING_NODE:
// STACK_DEL(&context, node.data.mapping.pairs)
// break
// default:
// assert(0) // Should not happen.
// }
// }
// STACK_DEL(&context, document.nodes)
//
// yaml_free(document.version_directive)
// for (tag_directive = document.tag_directives.start
// tag_directive != document.tag_directives.end
// tag_directive++) {
// yaml_free(tag_directive.handle)
// yaml_free(tag_directive.prefix)
// }
// yaml_free(document.tag_directives.start)
//
// memset(document, 0, sizeof(yaml_document_t))
//}
//
///**
// * Get a document node.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_node(document *yaml_document_t, index int)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
// return document.nodes.start + index - 1
// }
// return NULL
//}
//
///**
// * Get the root object.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_root_node(document *yaml_document_t)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (document.nodes.top != document.nodes.start) {
// return document.nodes.start
// }
// return NULL
//}
//
///*
// * Add a scalar node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_scalar(document *yaml_document_t,
// tag *yaml_char_t, value *yaml_char_t, length int,
// style yaml_scalar_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// value_copy *yaml_char_t = NULL
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
// assert(value) // Non-NULL value is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (length < 0) {
// length = strlen((char *)value)
// }
//
// if (!yaml_check_utf8(value, length)) goto error
// value_copy = yaml_malloc(length+1)
// if (!value_copy) goto error
// memcpy(value_copy, value, length)
// value_copy[length] = '\0'
//
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// yaml_free(tag_copy)
// yaml_free(value_copy)
//
// return 0
//}
//
///*
// * Add a sequence node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_sequence(document *yaml_document_t,
// tag *yaml_char_t, style yaml_sequence_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_item_t
// end *yaml_node_item_t
// top *yaml_node_item_t
// } items = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
//
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, items)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Add a mapping node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_mapping(document *yaml_document_t,
// tag *yaml_char_t, style yaml_mapping_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_pair_t
// end *yaml_node_pair_t
// top *yaml_node_pair_t
// } pairs = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
//
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, pairs)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Append an item to a sequence node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_sequence_item(document *yaml_document_t,
// sequence int, item int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// assert(document) // Non-NULL document is required.
// assert(sequence > 0
// && document.nodes.start + sequence <= document.nodes.top)
// // Valid sequence id is required.
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
// // A sequence node is required.
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
// // Valid item id is required.
//
// if (!PUSH(&context,
// document.nodes.start[sequence-1].data.sequence.items, item))
// return 0
//
// return 1
//}
//
///*
// * Append a pair of a key and a value to a mapping node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_mapping_pair(document *yaml_document_t,
// mapping int, key int, value int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// pair yaml_node_pair_t
//
// assert(document) // Non-NULL document is required.
// assert(mapping > 0
// && document.nodes.start + mapping <= document.nodes.top)
// // Valid mapping id is required.
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
// // A mapping node is required.
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
// // Valid key id is required.
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
// // Valid value id is required.
//
// pair.key = key
// pair.value = value
//
// if (!PUSH(&context,
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
// return 0
//
// return 1
//}
//
//

683
vendor/github.com/go-yaml/yaml/decode.go generated vendored Normal file
View File

@@ -0,0 +1,683 @@
package yaml
import (
"encoding"
"encoding/base64"
"fmt"
"math"
"reflect"
"strconv"
"time"
)
const (
documentNode = 1 << iota
mappingNode
sequenceNode
scalarNode
aliasNode
)
type node struct {
kind int
line, column int
tag string
value string
implicit bool
children []*node
anchors map[string]*node
}
// ----------------------------------------------------------------------------
// Parser, produces a node tree out of a libyaml event stream.
type parser struct {
parser yaml_parser_t
event yaml_event_t
doc *node
}
func newParser(b []byte) *parser {
p := parser{}
if !yaml_parser_initialize(&p.parser) {
panic("failed to initialize YAML emitter")
}
if len(b) == 0 {
b = []byte{'\n'}
}
yaml_parser_set_input_string(&p.parser, b)
p.skip()
if p.event.typ != yaml_STREAM_START_EVENT {
panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
}
p.skip()
return &p
}
func (p *parser) destroy() {
if p.event.typ != yaml_NO_EVENT {
yaml_event_delete(&p.event)
}
yaml_parser_delete(&p.parser)
}
func (p *parser) skip() {
if p.event.typ != yaml_NO_EVENT {
if p.event.typ == yaml_STREAM_END_EVENT {
failf("attempted to go past the end of stream; corrupted value?")
}
yaml_event_delete(&p.event)
}
if !yaml_parser_parse(&p.parser, &p.event) {
p.fail()
}
}
func (p *parser) fail() {
var where string
var line int
if p.parser.problem_mark.line != 0 {
line = p.parser.problem_mark.line
} else if p.parser.context_mark.line != 0 {
line = p.parser.context_mark.line
}
if line != 0 {
where = "line " + strconv.Itoa(line) + ": "
}
var msg string
if len(p.parser.problem) > 0 {
msg = p.parser.problem
} else {
msg = "unknown problem parsing YAML content"
}
failf("%s%s", where, msg)
}
func (p *parser) anchor(n *node, anchor []byte) {
if anchor != nil {
p.doc.anchors[string(anchor)] = n
}
}
func (p *parser) parse() *node {
switch p.event.typ {
case yaml_SCALAR_EVENT:
return p.scalar()
case yaml_ALIAS_EVENT:
return p.alias()
case yaml_MAPPING_START_EVENT:
return p.mapping()
case yaml_SEQUENCE_START_EVENT:
return p.sequence()
case yaml_DOCUMENT_START_EVENT:
return p.document()
case yaml_STREAM_END_EVENT:
// Happens when attempting to decode an empty buffer.
return nil
default:
panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
}
panic("unreachable")
}
func (p *parser) node(kind int) *node {
return &node{
kind: kind,
line: p.event.start_mark.line,
column: p.event.start_mark.column,
}
}
func (p *parser) document() *node {
n := p.node(documentNode)
n.anchors = make(map[string]*node)
p.doc = n
p.skip()
n.children = append(n.children, p.parse())
if p.event.typ != yaml_DOCUMENT_END_EVENT {
panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
}
p.skip()
return n
}
func (p *parser) alias() *node {
n := p.node(aliasNode)
n.value = string(p.event.anchor)
p.skip()
return n
}
func (p *parser) scalar() *node {
n := p.node(scalarNode)
n.value = string(p.event.value)
n.tag = string(p.event.tag)
n.implicit = p.event.implicit
p.anchor(n, p.event.anchor)
p.skip()
return n
}
func (p *parser) sequence() *node {
n := p.node(sequenceNode)
p.anchor(n, p.event.anchor)
p.skip()
for p.event.typ != yaml_SEQUENCE_END_EVENT {
n.children = append(n.children, p.parse())
}
p.skip()
return n
}
func (p *parser) mapping() *node {
n := p.node(mappingNode)
p.anchor(n, p.event.anchor)
p.skip()
for p.event.typ != yaml_MAPPING_END_EVENT {
n.children = append(n.children, p.parse(), p.parse())
}
p.skip()
return n
}
// ----------------------------------------------------------------------------
// Decoder, unmarshals a node into a provided value.
type decoder struct {
doc *node
aliases map[string]bool
mapType reflect.Type
terrors []string
}
var (
mapItemType = reflect.TypeOf(MapItem{})
durationType = reflect.TypeOf(time.Duration(0))
defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
ifaceType = defaultMapType.Elem()
)
func newDecoder() *decoder {
d := &decoder{mapType: defaultMapType}
d.aliases = make(map[string]bool)
return d
}
func (d *decoder) terror(n *node, tag string, out reflect.Value) {
if n.tag != "" {
tag = n.tag
}
value := n.value
if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
if len(value) > 10 {
value = " `" + value[:7] + "...`"
} else {
value = " `" + value + "`"
}
}
d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
}
func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
terrlen := len(d.terrors)
err := u.UnmarshalYAML(func(v interface{}) (err error) {
defer handleErr(&err)
d.unmarshal(n, reflect.ValueOf(v))
if len(d.terrors) > terrlen {
issues := d.terrors[terrlen:]
d.terrors = d.terrors[:terrlen]
return &TypeError{issues}
}
return nil
})
if e, ok := err.(*TypeError); ok {
d.terrors = append(d.terrors, e.Errors...)
return false
}
if err != nil {
fail(err)
}
return true
}
// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
// if a value is found to implement it.
// It returns the initialized and dereferenced out value, whether
// unmarshalling was already done by UnmarshalYAML, and if so whether
// its types unmarshalled appropriately.
//
// If n holds a null value, prepare returns before doing anything.
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
return out, false, false
}
again := true
for again {
again = false
if out.Kind() == reflect.Ptr {
if out.IsNil() {
out.Set(reflect.New(out.Type().Elem()))
}
out = out.Elem()
again = true
}
if out.CanAddr() {
if u, ok := out.Addr().Interface().(Unmarshaler); ok {
good = d.callUnmarshaler(n, u)
return out, true, good
}
}
}
return out, false, false
}
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
switch n.kind {
case documentNode:
return d.document(n, out)
case aliasNode:
return d.alias(n, out)
}
out, unmarshaled, good := d.prepare(n, out)
if unmarshaled {
return good
}
switch n.kind {
case scalarNode:
good = d.scalar(n, out)
case mappingNode:
good = d.mapping(n, out)
case sequenceNode:
good = d.sequence(n, out)
default:
panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
}
return good
}
func (d *decoder) document(n *node, out reflect.Value) (good bool) {
if len(n.children) == 1 {
d.doc = n
d.unmarshal(n.children[0], out)
return true
}
return false
}
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
an, ok := d.doc.anchors[n.value]
if !ok {
failf("unknown anchor '%s' referenced", n.value)
}
if d.aliases[n.value] {
failf("anchor '%s' value contains itself", n.value)
}
d.aliases[n.value] = true
good = d.unmarshal(an, out)
delete(d.aliases, n.value)
return good
}
var zeroValue reflect.Value
func resetMap(out reflect.Value) {
for _, k := range out.MapKeys() {
out.SetMapIndex(k, zeroValue)
}
}
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
var tag string
var resolved interface{}
if n.tag == "" && !n.implicit {
tag = yaml_STR_TAG
resolved = n.value
} else {
tag, resolved = resolve(n.tag, n.value)
if tag == yaml_BINARY_TAG {
data, err := base64.StdEncoding.DecodeString(resolved.(string))
if err != nil {
failf("!!binary value contains invalid base64 data")
}
resolved = string(data)
}
}
if resolved == nil {
if out.Kind() == reflect.Map && !out.CanAddr() {
resetMap(out)
} else {
out.Set(reflect.Zero(out.Type()))
}
return true
}
if s, ok := resolved.(string); ok && out.CanAddr() {
if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
err := u.UnmarshalText([]byte(s))
if err != nil {
fail(err)
}
return true
}
}
switch out.Kind() {
case reflect.String:
if tag == yaml_BINARY_TAG {
out.SetString(resolved.(string))
good = true
} else if resolved != nil {
out.SetString(n.value)
good = true
}
case reflect.Interface:
if resolved == nil {
out.Set(reflect.Zero(out.Type()))
} else {
out.Set(reflect.ValueOf(resolved))
}
good = true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch resolved := resolved.(type) {
case int:
if !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
}
case int64:
if !out.OverflowInt(resolved) {
out.SetInt(resolved)
good = true
}
case uint64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
}
case float64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
}
case string:
if out.Type() == durationType {
d, err := time.ParseDuration(resolved)
if err == nil {
out.SetInt(int64(d))
good = true
}
}
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch resolved := resolved.(type) {
case int:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
case int64:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
case uint64:
if !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
case float64:
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
}
case reflect.Bool:
switch resolved := resolved.(type) {
case bool:
out.SetBool(resolved)
good = true
}
case reflect.Float32, reflect.Float64:
switch resolved := resolved.(type) {
case int:
out.SetFloat(float64(resolved))
good = true
case int64:
out.SetFloat(float64(resolved))
good = true
case uint64:
out.SetFloat(float64(resolved))
good = true
case float64:
out.SetFloat(resolved)
good = true
}
case reflect.Ptr:
if out.Type().Elem() == reflect.TypeOf(resolved) {
// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
elem := reflect.New(out.Type().Elem())
elem.Elem().Set(reflect.ValueOf(resolved))
out.Set(elem)
good = true
}
}
if !good {
d.terror(n, tag, out)
}
return good
}
func settableValueOf(i interface{}) reflect.Value {
v := reflect.ValueOf(i)
sv := reflect.New(v.Type()).Elem()
sv.Set(v)
return sv
}
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
l := len(n.children)
var iface reflect.Value
switch out.Kind() {
case reflect.Slice:
out.Set(reflect.MakeSlice(out.Type(), l, l))
case reflect.Interface:
// No type hints. Will have to use a generic sequence.
iface = out
out = settableValueOf(make([]interface{}, l))
default:
d.terror(n, yaml_SEQ_TAG, out)
return false
}
et := out.Type().Elem()
j := 0
for i := 0; i < l; i++ {
e := reflect.New(et).Elem()
if ok := d.unmarshal(n.children[i], e); ok {
out.Index(j).Set(e)
j++
}
}
out.Set(out.Slice(0, j))
if iface.IsValid() {
iface.Set(out)
}
return true
}
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
switch out.Kind() {
case reflect.Struct:
return d.mappingStruct(n, out)
case reflect.Slice:
return d.mappingSlice(n, out)
case reflect.Map:
// okay
case reflect.Interface:
if d.mapType.Kind() == reflect.Map {
iface := out
out = reflect.MakeMap(d.mapType)
iface.Set(out)
} else {
slicev := reflect.New(d.mapType).Elem()
if !d.mappingSlice(n, slicev) {
return false
}
out.Set(slicev)
return true
}
default:
d.terror(n, yaml_MAP_TAG, out)
return false
}
outt := out.Type()
kt := outt.Key()
et := outt.Elem()
mapType := d.mapType
if outt.Key() == ifaceType && outt.Elem() == ifaceType {
d.mapType = outt
}
if out.IsNil() {
out.Set(reflect.MakeMap(outt))
}
l := len(n.children)
for i := 0; i < l; i += 2 {
if isMerge(n.children[i]) {
d.merge(n.children[i+1], out)
continue
}
k := reflect.New(kt).Elem()
if d.unmarshal(n.children[i], k) {
kkind := k.Kind()
if kkind == reflect.Interface {
kkind = k.Elem().Kind()
}
if kkind == reflect.Map || kkind == reflect.Slice {
failf("invalid map key: %#v", k.Interface())
}
e := reflect.New(et).Elem()
if d.unmarshal(n.children[i+1], e) {
out.SetMapIndex(k, e)
}
}
}
d.mapType = mapType
return true
}
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
outt := out.Type()
if outt.Elem() != mapItemType {
d.terror(n, yaml_MAP_TAG, out)
return false
}
mapType := d.mapType
d.mapType = outt
var slice []MapItem
var l = len(n.children)
for i := 0; i < l; i += 2 {
if isMerge(n.children[i]) {
d.merge(n.children[i+1], out)
continue
}
item := MapItem{}
k := reflect.ValueOf(&item.Key).Elem()
if d.unmarshal(n.children[i], k) {
v := reflect.ValueOf(&item.Value).Elem()
if d.unmarshal(n.children[i+1], v) {
slice = append(slice, item)
}
}
}
out.Set(reflect.ValueOf(slice))
d.mapType = mapType
return true
}
func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
sinfo, err := getStructInfo(out.Type())
if err != nil {
panic(err)
}
name := settableValueOf("")
l := len(n.children)
var inlineMap reflect.Value
var elemType reflect.Type
if sinfo.InlineMap != -1 {
inlineMap = out.Field(sinfo.InlineMap)
inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
elemType = inlineMap.Type().Elem()
}
for i := 0; i < l; i += 2 {
ni := n.children[i]
if isMerge(ni) {
d.merge(n.children[i+1], out)
continue
}
if !d.unmarshal(ni, name) {
continue
}
if info, ok := sinfo.FieldsMap[name.String()]; ok {
var field reflect.Value
if info.Inline == nil {
field = out.Field(info.Num)
} else {
field = out.FieldByIndex(info.Inline)
}
d.unmarshal(n.children[i+1], field)
} else if sinfo.InlineMap != -1 {
if inlineMap.IsNil() {
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
}
value := reflect.New(elemType).Elem()
d.unmarshal(n.children[i+1], value)
inlineMap.SetMapIndex(name, value)
}
}
return true
}
func failWantMap() {
failf("map merge requires map or sequence of maps as the value")
}
func (d *decoder) merge(n *node, out reflect.Value) {
switch n.kind {
case mappingNode:
d.unmarshal(n, out)
case aliasNode:
an, ok := d.doc.anchors[n.value]
if ok && an.kind != mappingNode {
failWantMap()
}
d.unmarshal(n, out)
case sequenceNode:
// Step backwards as earlier nodes take precedence.
for i := len(n.children) - 1; i >= 0; i-- {
ni := n.children[i]
if ni.kind == aliasNode {
an, ok := d.doc.anchors[ni.value]
if ok && an.kind != mappingNode {
failWantMap()
}
} else if ni.kind != mappingNode {
failWantMap()
}
d.unmarshal(ni, out)
}
default:
failWantMap()
}
}
func isMerge(n *node) bool {
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
}

988
vendor/github.com/go-yaml/yaml/decode_test.go generated vendored Normal file
View File

@@ -0,0 +1,988 @@
package yaml_test
import (
"errors"
. "gopkg.in/check.v1"
"gopkg.in/yaml.v2"
"math"
"net"
"reflect"
"strings"
"time"
)
var unmarshalIntTest = 123
var unmarshalTests = []struct {
data string
value interface{}
}{
{
"",
&struct{}{},
}, {
"{}", &struct{}{},
}, {
"v: hi",
map[string]string{"v": "hi"},
}, {
"v: hi", map[string]interface{}{"v": "hi"},
}, {
"v: true",
map[string]string{"v": "true"},
}, {
"v: true",
map[string]interface{}{"v": true},
}, {
"v: 10",
map[string]interface{}{"v": 10},
}, {
"v: 0b10",
map[string]interface{}{"v": 2},
}, {
"v: 0xA",
map[string]interface{}{"v": 10},
}, {
"v: 4294967296",
map[string]int64{"v": 4294967296},
}, {
"v: 0.1",
map[string]interface{}{"v": 0.1},
}, {
"v: .1",
map[string]interface{}{"v": 0.1},
}, {
"v: .Inf",
map[string]interface{}{"v": math.Inf(+1)},
}, {
"v: -.Inf",
map[string]interface{}{"v": math.Inf(-1)},
}, {
"v: -10",
map[string]interface{}{"v": -10},
}, {
"v: -.1",
map[string]interface{}{"v": -0.1},
},
// Simple values.
{
"123",
&unmarshalIntTest,
},
// Floats from spec
{
"canonical: 6.8523e+5",
map[string]interface{}{"canonical": 6.8523e+5},
}, {
"expo: 685.230_15e+03",
map[string]interface{}{"expo": 685.23015e+03},
}, {
"fixed: 685_230.15",
map[string]interface{}{"fixed": 685230.15},
}, {
"neginf: -.inf",
map[string]interface{}{"neginf": math.Inf(-1)},
}, {
"fixed: 685_230.15",
map[string]float64{"fixed": 685230.15},
},
//{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported
//{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails.
// Bools from spec
{
"canonical: y",
map[string]interface{}{"canonical": true},
}, {
"answer: NO",
map[string]interface{}{"answer": false},
}, {
"logical: True",
map[string]interface{}{"logical": true},
}, {
"option: on",
map[string]interface{}{"option": true},
}, {
"option: on",
map[string]bool{"option": true},
},
// Ints from spec
{
"canonical: 685230",
map[string]interface{}{"canonical": 685230},
}, {
"decimal: +685_230",
map[string]interface{}{"decimal": 685230},
}, {
"octal: 02472256",
map[string]interface{}{"octal": 685230},
}, {
"hexa: 0x_0A_74_AE",
map[string]interface{}{"hexa": 685230},
}, {
"bin: 0b1010_0111_0100_1010_1110",
map[string]interface{}{"bin": 685230},
}, {
"bin: -0b101010",
map[string]interface{}{"bin": -42},
}, {
"decimal: +685_230",
map[string]int{"decimal": 685230},
},
//{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported
// Nulls from spec
{
"empty:",
map[string]interface{}{"empty": nil},
}, {
"canonical: ~",
map[string]interface{}{"canonical": nil},
}, {
"english: null",
map[string]interface{}{"english": nil},
}, {
"~: null key",
map[interface{}]string{nil: "null key"},
}, {
"empty:",
map[string]*bool{"empty": nil},
},
// Flow sequence
{
"seq: [A,B]",
map[string]interface{}{"seq": []interface{}{"A", "B"}},
}, {
"seq: [A,B,C,]",
map[string][]string{"seq": []string{"A", "B", "C"}},
}, {
"seq: [A,1,C]",
map[string][]string{"seq": []string{"A", "1", "C"}},
}, {
"seq: [A,1,C]",
map[string][]int{"seq": []int{1}},
}, {
"seq: [A,1,C]",
map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
},
// Block sequence
{
"seq:\n - A\n - B",
map[string]interface{}{"seq": []interface{}{"A", "B"}},
}, {
"seq:\n - A\n - B\n - C",
map[string][]string{"seq": []string{"A", "B", "C"}},
}, {
"seq:\n - A\n - 1\n - C",
map[string][]string{"seq": []string{"A", "1", "C"}},
}, {
"seq:\n - A\n - 1\n - C",
map[string][]int{"seq": []int{1}},
}, {
"seq:\n - A\n - 1\n - C",
map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
},
// Literal block scalar
{
"scalar: | # Comment\n\n literal\n\n \ttext\n\n",
map[string]string{"scalar": "\nliteral\n\n\ttext\n"},
},
// Folded block scalar
{
"scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n",
map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"},
},
// Map inside interface with no type hints.
{
"a: {b: c}",
map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
},
// Structs and type conversions.
{
"hello: world",
&struct{ Hello string }{"world"},
}, {
"a: {b: c}",
&struct{ A struct{ B string } }{struct{ B string }{"c"}},
}, {
"a: {b: c}",
&struct{ A *struct{ B string } }{&struct{ B string }{"c"}},
}, {
"a: {b: c}",
&struct{ A map[string]string }{map[string]string{"b": "c"}},
}, {
"a: {b: c}",
&struct{ A *map[string]string }{&map[string]string{"b": "c"}},
}, {
"a:",
&struct{ A map[string]string }{},
}, {
"a: 1",
&struct{ A int }{1},
}, {
"a: 1",
&struct{ A float64 }{1},
}, {
"a: 1.0",
&struct{ A int }{1},
}, {
"a: 1.0",
&struct{ A uint }{1},
}, {
"a: [1, 2]",
&struct{ A []int }{[]int{1, 2}},
}, {
"a: 1",
&struct{ B int }{0},
}, {
"a: 1",
&struct {
B int "a"
}{1},
}, {
"a: y",
&struct{ A bool }{true},
},
// Some cross type conversions
{
"v: 42",
map[string]uint{"v": 42},
}, {
"v: -42",
map[string]uint{},
}, {
"v: 4294967296",
map[string]uint64{"v": 4294967296},
}, {
"v: -4294967296",
map[string]uint64{},
},
// int
{
"int_max: 2147483647",
map[string]int{"int_max": math.MaxInt32},
},
{
"int_min: -2147483648",
map[string]int{"int_min": math.MinInt32},
},
{
"int_overflow: 9223372036854775808", // math.MaxInt64 + 1
map[string]int{},
},
// int64
{
"int64_max: 9223372036854775807",
map[string]int64{"int64_max": math.MaxInt64},
},
{
"int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111",
map[string]int64{"int64_max_base2": math.MaxInt64},
},
{
"int64_min: -9223372036854775808",
map[string]int64{"int64_min": math.MinInt64},
},
{
"int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111",
map[string]int64{"int64_neg_base2": -math.MaxInt64},
},
{
"int64_overflow: 9223372036854775808", // math.MaxInt64 + 1
map[string]int64{},
},
// uint
{
"uint_min: 0",
map[string]uint{"uint_min": 0},
},
{
"uint_max: 4294967295",
map[string]uint{"uint_max": math.MaxUint32},
},
{
"uint_underflow: -1",
map[string]uint{},
},
// uint64
{
"uint64_min: 0",
map[string]uint{"uint64_min": 0},
},
{
"uint64_max: 18446744073709551615",
map[string]uint64{"uint64_max": math.MaxUint64},
},
{
"uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111",
map[string]uint64{"uint64_max_base2": math.MaxUint64},
},
{
"uint64_maxint64: 9223372036854775807",
map[string]uint64{"uint64_maxint64": math.MaxInt64},
},
{
"uint64_underflow: -1",
map[string]uint64{},
},
// float32
{
"float32_max: 3.40282346638528859811704183484516925440e+38",
map[string]float32{"float32_max": math.MaxFloat32},
},
{
"float32_nonzero: 1.401298464324817070923729583289916131280e-45",
map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32},
},
{
"float32_maxuint64: 18446744073709551615",
map[string]float32{"float32_maxuint64": float32(math.MaxUint64)},
},
{
"float32_maxuint64+1: 18446744073709551616",
map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)},
},
// float64
{
"float64_max: 1.797693134862315708145274237317043567981e+308",
map[string]float64{"float64_max": math.MaxFloat64},
},
{
"float64_nonzero: 4.940656458412465441765687928682213723651e-324",
map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64},
},
{
"float64_maxuint64: 18446744073709551615",
map[string]float64{"float64_maxuint64": float64(math.MaxUint64)},
},
{
"float64_maxuint64+1: 18446744073709551616",
map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)},
},
// Overflow cases.
{
"v: 4294967297",
map[string]int32{},
}, {
"v: 128",
map[string]int8{},
},
// Quoted values.
{
"'1': '\"2\"'",
map[interface{}]interface{}{"1": "\"2\""},
}, {
"v:\n- A\n- 'B\n\n C'\n",
map[string][]string{"v": []string{"A", "B\nC"}},
},
// Explicit tags.
{
"v: !!float '1.1'",
map[string]interface{}{"v": 1.1},
}, {
"v: !!null ''",
map[string]interface{}{"v": nil},
}, {
"%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'",
map[string]interface{}{"v": 1},
},
// Anchors and aliases.
{
"a: &x 1\nb: &y 2\nc: *x\nd: *y\n",
&struct{ A, B, C, D int }{1, 2, 1, 2},
}, {
"a: &a {c: 1}\nb: *a",
&struct {
A, B struct {
C int
}
}{struct{ C int }{1}, struct{ C int }{1}},
}, {
"a: &a [1, 2]\nb: *a",
&struct{ B []int }{[]int{1, 2}},
}, {
"b: *a\na: &a {c: 1}",
&struct {
A, B struct {
C int
}
}{struct{ C int }{1}, struct{ C int }{1}},
},
// Bug #1133337
{
"foo: ''",
map[string]*string{"foo": new(string)},
}, {
"foo: null",
map[string]string{"foo": ""},
}, {
"foo: null",
map[string]interface{}{"foo": nil},
},
// Ignored field
{
"a: 1\nb: 2\n",
&struct {
A int
B int "-"
}{1, 0},
},
// Bug #1191981
{
"" +
"%YAML 1.1\n" +
"--- !!str\n" +
`"Generic line break (no glyph)\n\` + "\n" +
` Generic line break (glyphed)\n\` + "\n" +
` Line separator\u2028\` + "\n" +
` Paragraph separator\u2029"` + "\n",
"" +
"Generic line break (no glyph)\n" +
"Generic line break (glyphed)\n" +
"Line separator\u2028Paragraph separator\u2029",
},
// Struct inlining
{
"a: 1\nb: 2\nc: 3\n",
&struct {
A int
C inlineB `yaml:",inline"`
}{1, inlineB{2, inlineC{3}}},
},
// Map inlining
{
"a: 1\nb: 2\nc: 3\n",
&struct {
A int
C map[string]int `yaml:",inline"`
}{1, map[string]int{"b": 2, "c": 3}},
},
// bug 1243827
{
"a: -b_c",
map[string]interface{}{"a": "-b_c"},
},
{
"a: +b_c",
map[string]interface{}{"a": "+b_c"},
},
{
"a: 50cent_of_dollar",
map[string]interface{}{"a": "50cent_of_dollar"},
},
// Duration
{
"a: 3s",
map[string]time.Duration{"a": 3 * time.Second},
},
// Issue #24.
{
"a: <foo>",
map[string]string{"a": "<foo>"},
},
// Base 60 floats are obsolete and unsupported.
{
"a: 1:1\n",
map[string]string{"a": "1:1"},
},
// Binary data.
{
"a: !!binary gIGC\n",
map[string]string{"a": "\x80\x81\x82"},
}, {
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
map[string]string{"a": strings.Repeat("\x90", 54)},
}, {
"a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n",
map[string]string{"a": strings.Repeat("\x00", 52)},
},
// Ordered maps.
{
"{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}",
&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
},
// Issue #39.
{
"a:\n b:\n c: d\n",
map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}},
},
// Custom map type.
{
"a: {b: c}",
M{"a": M{"b": "c"}},
},
// Support encoding.TextUnmarshaler.
{
"a: 1.2.3.4\n",
map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
},
{
"a: 2015-02-24T18:19:39Z\n",
map[string]time.Time{"a": time.Unix(1424801979, 0)},
},
// Encode empty lists as zero-length slices.
{
"a: []",
&struct{ A []int }{[]int{}},
},
// UTF-16-LE
{
"\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n\x00",
M{"ñoño": "very yes"},
},
// UTF-16-LE with surrogate.
{
"\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \x00=\xd8\xd4\xdf\n\x00",
M{"ñoño": "very yes 🟔"},
},
// UTF-16-BE
{
"\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n",
M{"ñoño": "very yes"},
},
// UTF-16-BE with surrogate.
{
"\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \xd8=\xdf\xd4\x00\n",
M{"ñoño": "very yes 🟔"},
},
}
type M map[interface{}]interface{}
type inlineB struct {
B int
inlineC `yaml:",inline"`
}
type inlineC struct {
C int
}
func (s *S) TestUnmarshal(c *C) {
for _, item := range unmarshalTests {
t := reflect.ValueOf(item.value).Type()
var value interface{}
switch t.Kind() {
case reflect.Map:
value = reflect.MakeMap(t).Interface()
case reflect.String:
value = reflect.New(t).Interface()
case reflect.Ptr:
value = reflect.New(t.Elem()).Interface()
default:
c.Fatalf("missing case for %s", t)
}
err := yaml.Unmarshal([]byte(item.data), value)
if _, ok := err.(*yaml.TypeError); !ok {
c.Assert(err, IsNil)
}
if t.Kind() == reflect.String {
c.Assert(*value.(*string), Equals, item.value)
} else {
c.Assert(value, DeepEquals, item.value)
}
}
}
func (s *S) TestUnmarshalNaN(c *C) {
value := map[string]interface{}{}
err := yaml.Unmarshal([]byte("notanum: .NaN"), &value)
c.Assert(err, IsNil)
c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
}
var unmarshalErrorTests = []struct {
data, error string
}{
{"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"},
{"v: [A,", "yaml: line 1: did not find expected node content"},
{"v:\n- [A,", "yaml: line 2: did not find expected node content"},
{"a: *b\n", "yaml: unknown anchor 'b' referenced"},
{"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"},
{"value: -", "yaml: block sequence entries are not allowed in this context"},
{"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"},
{"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`},
{"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`},
}
func (s *S) TestUnmarshalErrors(c *C) {
for _, item := range unmarshalErrorTests {
var value interface{}
err := yaml.Unmarshal([]byte(item.data), &value)
c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
}
}
var unmarshalerTests = []struct {
data, tag string
value interface{}
}{
{"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}},
{"_: [1,A]", "!!seq", []interface{}{1, "A"}},
{"_: 10", "!!int", 10},
{"_: null", "!!null", nil},
{`_: BAR!`, "!!str", "BAR!"},
{`_: "BAR!"`, "!!str", "BAR!"},
{"_: !!foo 'BAR!'", "!!foo", "BAR!"},
}
var unmarshalerResult = map[int]error{}
type unmarshalerType struct {
value interface{}
}
func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error {
if err := unmarshal(&o.value); err != nil {
return err
}
if i, ok := o.value.(int); ok {
if result, ok := unmarshalerResult[i]; ok {
return result
}
}
return nil
}
type unmarshalerPointer struct {
Field *unmarshalerType "_"
}
type unmarshalerValue struct {
Field unmarshalerType "_"
}
func (s *S) TestUnmarshalerPointerField(c *C) {
for _, item := range unmarshalerTests {
obj := &unmarshalerPointer{}
err := yaml.Unmarshal([]byte(item.data), obj)
c.Assert(err, IsNil)
if item.value == nil {
c.Assert(obj.Field, IsNil)
} else {
c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
c.Assert(obj.Field.value, DeepEquals, item.value)
}
}
}
func (s *S) TestUnmarshalerValueField(c *C) {
for _, item := range unmarshalerTests {
obj := &unmarshalerValue{}
err := yaml.Unmarshal([]byte(item.data), obj)
c.Assert(err, IsNil)
c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
c.Assert(obj.Field.value, DeepEquals, item.value)
}
}
func (s *S) TestUnmarshalerWholeDocument(c *C) {
obj := &unmarshalerType{}
err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj)
c.Assert(err, IsNil)
value, ok := obj.value.(map[interface{}]interface{})
c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value))
c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value)
}
func (s *S) TestUnmarshalerTypeError(c *C) {
unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}}
unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}}
defer func() {
delete(unmarshalerResult, 2)
delete(unmarshalerResult, 4)
}()
type T struct {
Before int
After int
M map[string]*unmarshalerType
}
var v T
data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}`
err := yaml.Unmarshal([]byte(data), &v)
c.Assert(err, ErrorMatches, ""+
"yaml: unmarshal errors:\n"+
" line 1: cannot unmarshal !!str `A` into int\n"+
" foo\n"+
" bar\n"+
" line 1: cannot unmarshal !!str `B` into int")
c.Assert(v.M["abc"], NotNil)
c.Assert(v.M["def"], IsNil)
c.Assert(v.M["ghi"], NotNil)
c.Assert(v.M["jkl"], IsNil)
c.Assert(v.M["abc"].value, Equals, 1)
c.Assert(v.M["ghi"].value, Equals, 3)
}
type proxyTypeError struct{}
func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error {
var s string
var a int32
var b int64
if err := unmarshal(&s); err != nil {
panic(err)
}
if s == "a" {
if err := unmarshal(&b); err == nil {
panic("should have failed")
}
return unmarshal(&a)
}
if err := unmarshal(&a); err == nil {
panic("should have failed")
}
return unmarshal(&b)
}
func (s *S) TestUnmarshalerTypeErrorProxying(c *C) {
type T struct {
Before int
After int
M map[string]*proxyTypeError
}
var v T
data := `{before: A, m: {abc: a, def: b}, after: B}`
err := yaml.Unmarshal([]byte(data), &v)
c.Assert(err, ErrorMatches, ""+
"yaml: unmarshal errors:\n"+
" line 1: cannot unmarshal !!str `A` into int\n"+
" line 1: cannot unmarshal !!str `a` into int32\n"+
" line 1: cannot unmarshal !!str `b` into int64\n"+
" line 1: cannot unmarshal !!str `B` into int")
}
type failingUnmarshaler struct{}
var failingErr = errors.New("failingErr")
func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
return failingErr
}
func (s *S) TestUnmarshalerError(c *C) {
err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{})
c.Assert(err, Equals, failingErr)
}
type sliceUnmarshaler []int
func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
var slice []int
err := unmarshal(&slice)
if err == nil {
*su = slice
return nil
}
var intVal int
err = unmarshal(&intVal)
if err == nil {
*su = []int{intVal}
return nil
}
return err
}
func (s *S) TestUnmarshalerRetry(c *C) {
var su sliceUnmarshaler
err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su)
c.Assert(err, IsNil)
c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3}))
err = yaml.Unmarshal([]byte("1"), &su)
c.Assert(err, IsNil)
c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1}))
}
// From http://yaml.org/type/merge.html
var mergeTests = `
anchors:
list:
- &CENTER { "x": 1, "y": 2 }
- &LEFT { "x": 0, "y": 2 }
- &BIG { "r": 10 }
- &SMALL { "r": 1 }
# All the following maps are equal:
plain:
# Explicit keys
"x": 1
"y": 2
"r": 10
label: center/big
mergeOne:
# Merge one map
<< : *CENTER
"r": 10
label: center/big
mergeMultiple:
# Merge multiple maps
<< : [ *CENTER, *BIG ]
label: center/big
override:
# Override
<< : [ *BIG, *LEFT, *SMALL ]
"x": 1
label: center/big
shortTag:
# Explicit short merge tag
!!merge "<<" : [ *CENTER, *BIG ]
label: center/big
longTag:
# Explicit merge long tag
!<tag:yaml.org,2002:merge> "<<" : [ *CENTER, *BIG ]
label: center/big
inlineMap:
# Inlined map
<< : {"x": 1, "y": 2, "r": 10}
label: center/big
inlineSequenceMap:
# Inlined map in sequence
<< : [ *CENTER, {"r": 10} ]
label: center/big
`
func (s *S) TestMerge(c *C) {
var want = map[interface{}]interface{}{
"x": 1,
"y": 2,
"r": 10,
"label": "center/big",
}
var m map[interface{}]interface{}
err := yaml.Unmarshal([]byte(mergeTests), &m)
c.Assert(err, IsNil)
for name, test := range m {
if name == "anchors" {
continue
}
c.Assert(test, DeepEquals, want, Commentf("test %q failed", name))
}
}
func (s *S) TestMergeStruct(c *C) {
type Data struct {
X, Y, R int
Label string
}
want := Data{1, 2, 10, "center/big"}
var m map[string]Data
err := yaml.Unmarshal([]byte(mergeTests), &m)
c.Assert(err, IsNil)
for name, test := range m {
if name == "anchors" {
continue
}
c.Assert(test, Equals, want, Commentf("test %q failed", name))
}
}
var unmarshalNullTests = []func() interface{}{
func() interface{} { var v interface{}; v = "v"; return &v },
func() interface{} { var s = "s"; return &s },
func() interface{} { var s = "s"; sptr := &s; return &sptr },
func() interface{} { var i = 1; return &i },
func() interface{} { var i = 1; iptr := &i; return &iptr },
func() interface{} { m := map[string]int{"s": 1}; return &m },
func() interface{} { m := map[string]int{"s": 1}; return m },
}
func (s *S) TestUnmarshalNull(c *C) {
for _, test := range unmarshalNullTests {
item := test()
zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface()
err := yaml.Unmarshal([]byte("null"), item)
c.Assert(err, IsNil)
if reflect.TypeOf(item).Kind() == reflect.Map {
c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface())
} else {
c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero)
}
}
}
func (s *S) TestUnmarshalSliceOnPreset(c *C) {
// Issue #48.
v := struct{ A []int }{[]int{1}}
yaml.Unmarshal([]byte("a: [2]"), &v)
c.Assert(v.A, DeepEquals, []int{2})
}
//var data []byte
//func init() {
// var err error
// data, err = ioutil.ReadFile("/tmp/file.yaml")
// if err != nil {
// panic(err)
// }
//}
//
//func (s *S) BenchmarkUnmarshal(c *C) {
// var err error
// for i := 0; i < c.N; i++ {
// var v map[string]interface{}
// err = yaml.Unmarshal(data, &v)
// }
// if err != nil {
// panic(err)
// }
//}
//
//func (s *S) BenchmarkMarshal(c *C) {
// var v map[string]interface{}
// yaml.Unmarshal(data, &v)
// c.ResetTimer()
// for i := 0; i < c.N; i++ {
// yaml.Marshal(&v)
// }
//}

1685
vendor/github.com/go-yaml/yaml/emitterc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

306
vendor/github.com/go-yaml/yaml/encode.go generated vendored Normal file
View File

@@ -0,0 +1,306 @@
package yaml
import (
"encoding"
"fmt"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
)
type encoder struct {
emitter yaml_emitter_t
event yaml_event_t
out []byte
flow bool
}
func newEncoder() (e *encoder) {
e = &encoder{}
e.must(yaml_emitter_initialize(&e.emitter))
yaml_emitter_set_output_string(&e.emitter, &e.out)
yaml_emitter_set_unicode(&e.emitter, true)
e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
e.emit()
e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
e.emit()
return e
}
func (e *encoder) finish() {
e.must(yaml_document_end_event_initialize(&e.event, true))
e.emit()
e.emitter.open_ended = false
e.must(yaml_stream_end_event_initialize(&e.event))
e.emit()
}
func (e *encoder) destroy() {
yaml_emitter_delete(&e.emitter)
}
func (e *encoder) emit() {
// This will internally delete the e.event value.
if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
e.must(false)
}
}
func (e *encoder) must(ok bool) {
if !ok {
msg := e.emitter.problem
if msg == "" {
msg = "unknown problem generating YAML content"
}
failf("%s", msg)
}
}
func (e *encoder) marshal(tag string, in reflect.Value) {
if !in.IsValid() {
e.nilv()
return
}
iface := in.Interface()
if m, ok := iface.(Marshaler); ok {
v, err := m.MarshalYAML()
if err != nil {
fail(err)
}
if v == nil {
e.nilv()
return
}
in = reflect.ValueOf(v)
} else if m, ok := iface.(encoding.TextMarshaler); ok {
text, err := m.MarshalText()
if err != nil {
fail(err)
}
in = reflect.ValueOf(string(text))
}
switch in.Kind() {
case reflect.Interface:
if in.IsNil() {
e.nilv()
} else {
e.marshal(tag, in.Elem())
}
case reflect.Map:
e.mapv(tag, in)
case reflect.Ptr:
if in.IsNil() {
e.nilv()
} else {
e.marshal(tag, in.Elem())
}
case reflect.Struct:
e.structv(tag, in)
case reflect.Slice:
if in.Type().Elem() == mapItemType {
e.itemsv(tag, in)
} else {
e.slicev(tag, in)
}
case reflect.String:
e.stringv(tag, in)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if in.Type() == durationType {
e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
} else {
e.intv(tag, in)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
e.uintv(tag, in)
case reflect.Float32, reflect.Float64:
e.floatv(tag, in)
case reflect.Bool:
e.boolv(tag, in)
default:
panic("cannot marshal type: " + in.Type().String())
}
}
func (e *encoder) mapv(tag string, in reflect.Value) {
e.mappingv(tag, func() {
keys := keyList(in.MapKeys())
sort.Sort(keys)
for _, k := range keys {
e.marshal("", k)
e.marshal("", in.MapIndex(k))
}
})
}
func (e *encoder) itemsv(tag string, in reflect.Value) {
e.mappingv(tag, func() {
slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
for _, item := range slice {
e.marshal("", reflect.ValueOf(item.Key))
e.marshal("", reflect.ValueOf(item.Value))
}
})
}
func (e *encoder) structv(tag string, in reflect.Value) {
sinfo, err := getStructInfo(in.Type())
if err != nil {
panic(err)
}
e.mappingv(tag, func() {
for _, info := range sinfo.FieldsList {
var value reflect.Value
if info.Inline == nil {
value = in.Field(info.Num)
} else {
value = in.FieldByIndex(info.Inline)
}
if info.OmitEmpty && isZero(value) {
continue
}
e.marshal("", reflect.ValueOf(info.Key))
e.flow = info.Flow
e.marshal("", value)
}
if sinfo.InlineMap >= 0 {
m := in.Field(sinfo.InlineMap)
if m.Len() > 0 {
e.flow = false
keys := keyList(m.MapKeys())
sort.Sort(keys)
for _, k := range keys {
if _, found := sinfo.FieldsMap[k.String()]; found {
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
}
e.marshal("", k)
e.flow = false
e.marshal("", m.MapIndex(k))
}
}
}
})
}
func (e *encoder) mappingv(tag string, f func()) {
implicit := tag == ""
style := yaml_BLOCK_MAPPING_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_MAPPING_STYLE
}
e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
e.emit()
f()
e.must(yaml_mapping_end_event_initialize(&e.event))
e.emit()
}
func (e *encoder) slicev(tag string, in reflect.Value) {
implicit := tag == ""
style := yaml_BLOCK_SEQUENCE_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_SEQUENCE_STYLE
}
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
e.emit()
n := in.Len()
for i := 0; i < n; i++ {
e.marshal("", in.Index(i))
}
e.must(yaml_sequence_end_event_initialize(&e.event))
e.emit()
}
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
//
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
// in YAML 1.2 and by this package, but these should be marshalled quoted for
// the time being for compatibility with other parsers.
func isBase60Float(s string) (result bool) {
// Fast path.
if s == "" {
return false
}
c := s[0]
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
return false
}
// Do the full match.
return base60float.MatchString(s)
}
// From http://yaml.org/type/float.html, except the regular expression there
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
func (e *encoder) stringv(tag string, in reflect.Value) {
var style yaml_scalar_style_t
s := in.String()
rtag, rs := resolve("", s)
if rtag == yaml_BINARY_TAG {
if tag == "" || tag == yaml_STR_TAG {
tag = rtag
s = rs.(string)
} else if tag == yaml_BINARY_TAG {
failf("explicitly tagged !!binary data must be base64-encoded")
} else {
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
}
}
if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
} else if strings.Contains(s, "\n") {
style = yaml_LITERAL_SCALAR_STYLE
} else {
style = yaml_PLAIN_SCALAR_STYLE
}
e.emitScalar(s, "", tag, style)
}
func (e *encoder) boolv(tag string, in reflect.Value) {
var s string
if in.Bool() {
s = "true"
} else {
s = "false"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) intv(tag string, in reflect.Value) {
s := strconv.FormatInt(in.Int(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) uintv(tag string, in reflect.Value) {
s := strconv.FormatUint(in.Uint(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) floatv(tag string, in reflect.Value) {
// FIXME: Handle 64 bits here.
s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
switch s {
case "+Inf":
s = ".inf"
case "-Inf":
s = "-.inf"
case "NaN":
s = ".nan"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) nilv() {
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
implicit := tag == ""
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
e.emit()
}

501
vendor/github.com/go-yaml/yaml/encode_test.go generated vendored Normal file
View File

@@ -0,0 +1,501 @@
package yaml_test
import (
"fmt"
"math"
"strconv"
"strings"
"time"
. "gopkg.in/check.v1"
"gopkg.in/yaml.v2"
"net"
"os"
)
var marshalIntTest = 123
var marshalTests = []struct {
value interface{}
data string
}{
{
nil,
"null\n",
}, {
&struct{}{},
"{}\n",
}, {
map[string]string{"v": "hi"},
"v: hi\n",
}, {
map[string]interface{}{"v": "hi"},
"v: hi\n",
}, {
map[string]string{"v": "true"},
"v: \"true\"\n",
}, {
map[string]string{"v": "false"},
"v: \"false\"\n",
}, {
map[string]interface{}{"v": true},
"v: true\n",
}, {
map[string]interface{}{"v": false},
"v: false\n",
}, {
map[string]interface{}{"v": 10},
"v: 10\n",
}, {
map[string]interface{}{"v": -10},
"v: -10\n",
}, {
map[string]uint{"v": 42},
"v: 42\n",
}, {
map[string]interface{}{"v": int64(4294967296)},
"v: 4294967296\n",
}, {
map[string]int64{"v": int64(4294967296)},
"v: 4294967296\n",
}, {
map[string]uint64{"v": 4294967296},
"v: 4294967296\n",
}, {
map[string]interface{}{"v": "10"},
"v: \"10\"\n",
}, {
map[string]interface{}{"v": 0.1},
"v: 0.1\n",
}, {
map[string]interface{}{"v": float64(0.1)},
"v: 0.1\n",
}, {
map[string]interface{}{"v": -0.1},
"v: -0.1\n",
}, {
map[string]interface{}{"v": math.Inf(+1)},
"v: .inf\n",
}, {
map[string]interface{}{"v": math.Inf(-1)},
"v: -.inf\n",
}, {
map[string]interface{}{"v": math.NaN()},
"v: .nan\n",
}, {
map[string]interface{}{"v": nil},
"v: null\n",
}, {
map[string]interface{}{"v": ""},
"v: \"\"\n",
}, {
map[string][]string{"v": []string{"A", "B"}},
"v:\n- A\n- B\n",
}, {
map[string][]string{"v": []string{"A", "B\nC"}},
"v:\n- A\n- |-\n B\n C\n",
}, {
map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
"v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
}, {
map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
"a:\n b: c\n",
}, {
map[string]interface{}{"a": "-"},
"a: '-'\n",
},
// Simple values.
{
&marshalIntTest,
"123\n",
},
// Structures
{
&struct{ Hello string }{"world"},
"hello: world\n",
}, {
&struct {
A struct {
B string
}
}{struct{ B string }{"c"}},
"a:\n b: c\n",
}, {
&struct {
A *struct {
B string
}
}{&struct{ B string }{"c"}},
"a:\n b: c\n",
}, {
&struct {
A *struct {
B string
}
}{},
"a: null\n",
}, {
&struct{ A int }{1},
"a: 1\n",
}, {
&struct{ A []int }{[]int{1, 2}},
"a:\n- 1\n- 2\n",
}, {
&struct {
B int "a"
}{1},
"a: 1\n",
}, {
&struct{ A bool }{true},
"a: true\n",
},
// Conditional flag
{
&struct {
A int "a,omitempty"
B int "b,omitempty"
}{1, 0},
"a: 1\n",
}, {
&struct {
A int "a,omitempty"
B int "b,omitempty"
}{0, 0},
"{}\n",
}, {
&struct {
A *struct{ X, y int } "a,omitempty,flow"
}{&struct{ X, y int }{1, 2}},
"a: {x: 1}\n",
}, {
&struct {
A *struct{ X, y int } "a,omitempty,flow"
}{nil},
"{}\n",
}, {
&struct {
A *struct{ X, y int } "a,omitempty,flow"
}{&struct{ X, y int }{}},
"a: {x: 0}\n",
}, {
&struct {
A struct{ X, y int } "a,omitempty,flow"
}{struct{ X, y int }{1, 2}},
"a: {x: 1}\n",
}, {
&struct {
A struct{ X, y int } "a,omitempty,flow"
}{struct{ X, y int }{0, 1}},
"{}\n",
}, {
&struct {
A float64 "a,omitempty"
B float64 "b,omitempty"
}{1, 0},
"a: 1\n",
},
// Flow flag
{
&struct {
A []int "a,flow"
}{[]int{1, 2}},
"a: [1, 2]\n",
}, {
&struct {
A map[string]string "a,flow"
}{map[string]string{"b": "c", "d": "e"}},
"a: {b: c, d: e}\n",
}, {
&struct {
A struct {
B, D string
} "a,flow"
}{struct{ B, D string }{"c", "e"}},
"a: {b: c, d: e}\n",
},
// Unexported field
{
&struct {
u int
A int
}{0, 1},
"a: 1\n",
},
// Ignored field
{
&struct {
A int
B int "-"
}{1, 2},
"a: 1\n",
},
// Struct inlining
{
&struct {
A int
C inlineB `yaml:",inline"`
}{1, inlineB{2, inlineC{3}}},
"a: 1\nb: 2\nc: 3\n",
},
// Map inlining
{
&struct {
A int
C map[string]int `yaml:",inline"`
}{1, map[string]int{"b": 2, "c": 3}},
"a: 1\nb: 2\nc: 3\n",
},
// Duration
{
map[string]time.Duration{"a": 3 * time.Second},
"a: 3s\n",
},
// Issue #24: bug in map merging logic.
{
map[string]string{"a": "<foo>"},
"a: <foo>\n",
},
// Issue #34: marshal unsupported base 60 floats quoted for compatibility
// with old YAML 1.1 parsers.
{
map[string]string{"a": "1:1"},
"a: \"1:1\"\n",
},
// Binary data.
{
map[string]string{"a": "\x00"},
"a: \"\\0\"\n",
}, {
map[string]string{"a": "\x80\x81\x82"},
"a: !!binary gIGC\n",
}, {
map[string]string{"a": strings.Repeat("\x90", 54)},
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
},
// Ordered maps.
{
&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
"b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n",
},
// Encode unicode as utf-8 rather than in escaped form.
{
map[string]string{"a": "你好"},
"a: 你好\n",
},
// Support encoding.TextMarshaler.
{
map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
"a: 1.2.3.4\n",
},
{
map[string]time.Time{"a": time.Unix(1424801979, 0)},
"a: 2015-02-24T18:19:39Z\n",
},
// Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible).
{
map[string]string{"a": "b: c"},
"a: 'b: c'\n",
},
// Containing hash mark ('#') in string should be quoted
{
map[string]string{"a": "Hello #comment"},
"a: 'Hello #comment'\n",
},
{
map[string]string{"a": "你好 #comment"},
"a: '你好 #comment'\n",
},
}
func (s *S) TestMarshal(c *C) {
defer os.Setenv("TZ", os.Getenv("TZ"))
os.Setenv("TZ", "UTC")
for _, item := range marshalTests {
data, err := yaml.Marshal(item.value)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, item.data)
}
}
var marshalErrorTests = []struct {
value interface{}
error string
panic string
}{{
value: &struct {
B int
inlineB ",inline"
}{1, inlineB{2, inlineC{3}}},
panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
}, {
value: &struct {
A int
B map[string]int ",inline"
}{1, map[string]int{"a": 2}},
panic: `Can't have key "a" in inlined map; conflicts with struct field`,
}}
func (s *S) TestMarshalErrors(c *C) {
for _, item := range marshalErrorTests {
if item.panic != "" {
c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
} else {
_, err := yaml.Marshal(item.value)
c.Assert(err, ErrorMatches, item.error)
}
}
}
func (s *S) TestMarshalTypeCache(c *C) {
var data []byte
var err error
func() {
type T struct{ A int }
data, err = yaml.Marshal(&T{})
c.Assert(err, IsNil)
}()
func() {
type T struct{ B int }
data, err = yaml.Marshal(&T{})
c.Assert(err, IsNil)
}()
c.Assert(string(data), Equals, "b: 0\n")
}
var marshalerTests = []struct {
data string
value interface{}
}{
{"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}},
{"_:\n- 1\n- A\n", []interface{}{1, "A"}},
{"_: 10\n", 10},
{"_: null\n", nil},
{"_: BAR!\n", "BAR!"},
}
type marshalerType struct {
value interface{}
}
func (o marshalerType) MarshalText() ([]byte, error) {
panic("MarshalText called on type with MarshalYAML")
}
func (o marshalerType) MarshalYAML() (interface{}, error) {
return o.value, nil
}
type marshalerValue struct {
Field marshalerType "_"
}
func (s *S) TestMarshaler(c *C) {
for _, item := range marshalerTests {
obj := &marshalerValue{}
obj.Field.value = item.value
data, err := yaml.Marshal(obj)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, string(item.data))
}
}
func (s *S) TestMarshalerWholeDocument(c *C) {
obj := &marshalerType{}
obj.value = map[string]string{"hello": "world!"}
data, err := yaml.Marshal(obj)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "hello: world!\n")
}
type failingMarshaler struct{}
func (ft *failingMarshaler) MarshalYAML() (interface{}, error) {
return nil, failingErr
}
func (s *S) TestMarshalerError(c *C) {
_, err := yaml.Marshal(&failingMarshaler{})
c.Assert(err, Equals, failingErr)
}
func (s *S) TestSortedOutput(c *C) {
order := []interface{}{
false,
true,
1,
uint(1),
1.0,
1.1,
1.2,
2,
uint(2),
2.0,
2.1,
"",
".1",
".2",
".a",
"1",
"2",
"a!10",
"a/2",
"a/10",
"a~10",
"ab/1",
"b/1",
"b/01",
"b/2",
"b/02",
"b/3",
"b/03",
"b1",
"b01",
"b3",
"c2.10",
"c10.2",
"d1",
"d12",
"d12a",
}
m := make(map[interface{}]int)
for _, k := range order {
m[k] = 1
}
data, err := yaml.Marshal(m)
c.Assert(err, IsNil)
out := "\n" + string(data)
last := 0
for i, k := range order {
repr := fmt.Sprint(k)
if s, ok := k.(string); ok {
if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
repr = `"` + repr + `"`
}
}
index := strings.Index(out, "\n"+repr+":")
if index == -1 {
c.Fatalf("%#v is not in the output: %#v", k, out)
}
if index < last {
c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
}
last = index
}
}

1096
vendor/github.com/go-yaml/yaml/parserc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

394
vendor/github.com/go-yaml/yaml/readerc.go generated vendored Normal file
View File

@@ -0,0 +1,394 @@
package yaml
import (
"io"
)
// Set the reader error and return 0.
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
parser.error = yaml_READER_ERROR
parser.problem = problem
parser.problem_offset = offset
parser.problem_value = value
return false
}
// Byte order marks.
const (
bom_UTF8 = "\xef\xbb\xbf"
bom_UTF16LE = "\xff\xfe"
bom_UTF16BE = "\xfe\xff"
)
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
// Ensure that we had enough bytes in the raw buffer.
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
if !yaml_parser_update_raw_buffer(parser) {
return false
}
}
// Determine the encoding.
buf := parser.raw_buffer
pos := parser.raw_buffer_pos
avail := len(buf) - pos
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
parser.encoding = yaml_UTF16LE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
parser.encoding = yaml_UTF16BE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
parser.encoding = yaml_UTF8_ENCODING
parser.raw_buffer_pos += 3
parser.offset += 3
} else {
parser.encoding = yaml_UTF8_ENCODING
}
return true
}
// Update the raw buffer.
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
size_read := 0
// Return if the raw buffer is full.
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
return true
}
// Return on EOF.
if parser.eof {
return true
}
// Move the remaining bytes in the raw buffer to the beginning.
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
}
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
parser.raw_buffer_pos = 0
// Call the read handler to fill the buffer.
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
if err == io.EOF {
parser.eof = true
} else if err != nil {
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
}
return true
}
// Ensure that the buffer contains at least `length` characters.
// Return true on success, false on failure.
//
// The length is supposed to be significantly less that the buffer size.
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
if parser.read_handler == nil {
panic("read handler must be set")
}
// If the EOF flag is set and the raw buffer is empty, do nothing.
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
return true
}
// Return if the buffer contains enough characters.
if parser.unread >= length {
return true
}
// Determine the input encoding if it is not known yet.
if parser.encoding == yaml_ANY_ENCODING {
if !yaml_parser_determine_encoding(parser) {
return false
}
}
// Move the unread characters to the beginning of the buffer.
buffer_len := len(parser.buffer)
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
buffer_len -= parser.buffer_pos
parser.buffer_pos = 0
} else if parser.buffer_pos == buffer_len {
buffer_len = 0
parser.buffer_pos = 0
}
// Open the whole buffer for writing, and cut it before returning.
parser.buffer = parser.buffer[:cap(parser.buffer)]
// Fill the buffer until it has enough characters.
first := true
for parser.unread < length {
// Fill the raw buffer if necessary.
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
if !yaml_parser_update_raw_buffer(parser) {
parser.buffer = parser.buffer[:buffer_len]
return false
}
}
first = false
// Decode the raw buffer.
inner:
for parser.raw_buffer_pos != len(parser.raw_buffer) {
var value rune
var width int
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
// Decode the next character.
switch parser.encoding {
case yaml_UTF8_ENCODING:
// Decode a UTF-8 character. Check RFC 3629
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
//
// The following table (taken from the RFC) is used for
// decoding.
//
// Char. number range | UTF-8 octet sequence
// (hexadecimal) | (binary)
// --------------------+------------------------------------
// 0000 0000-0000 007F | 0xxxxxxx
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
//
// Additionally, the characters in the range 0xD800-0xDFFF
// are prohibited as they are reserved for use with UTF-16
// surrogate pairs.
// Determine the length of the UTF-8 sequence.
octet := parser.raw_buffer[parser.raw_buffer_pos]
switch {
case octet&0x80 == 0x00:
width = 1
case octet&0xE0 == 0xC0:
width = 2
case octet&0xF0 == 0xE0:
width = 3
case octet&0xF8 == 0xF0:
width = 4
default:
// The leading octet is invalid.
return yaml_parser_set_reader_error(parser,
"invalid leading UTF-8 octet",
parser.offset, int(octet))
}
// Check if the raw buffer contains an incomplete character.
if width > raw_unread {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-8 octet sequence",
parser.offset, -1)
}
break inner
}
// Decode the leading octet.
switch {
case octet&0x80 == 0x00:
value = rune(octet & 0x7F)
case octet&0xE0 == 0xC0:
value = rune(octet & 0x1F)
case octet&0xF0 == 0xE0:
value = rune(octet & 0x0F)
case octet&0xF8 == 0xF0:
value = rune(octet & 0x07)
default:
value = 0
}
// Check and decode the trailing octets.
for k := 1; k < width; k++ {
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
// Check if the octet is valid.
if (octet & 0xC0) != 0x80 {
return yaml_parser_set_reader_error(parser,
"invalid trailing UTF-8 octet",
parser.offset+k, int(octet))
}
// Decode the octet.
value = (value << 6) + rune(octet&0x3F)
}
// Check the length of the sequence against the value.
switch {
case width == 1:
case width == 2 && value >= 0x80:
case width == 3 && value >= 0x800:
case width == 4 && value >= 0x10000:
default:
return yaml_parser_set_reader_error(parser,
"invalid length of a UTF-8 sequence",
parser.offset, -1)
}
// Check the range of the value.
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
return yaml_parser_set_reader_error(parser,
"invalid Unicode character",
parser.offset, int(value))
}
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
var low, high int
if parser.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
low, high = 1, 0
}
// The UTF-16 encoding is not as simple as one might
// naively think. Check RFC 2781
// (http://www.ietf.org/rfc/rfc2781.txt).
//
// Normally, two subsequent bytes describe a Unicode
// character. However a special technique (called a
// surrogate pair) is used for specifying character
// values larger than 0xFFFF.
//
// A surrogate pair consists of two pseudo-characters:
// high surrogate area (0xD800-0xDBFF)
// low surrogate area (0xDC00-0xDFFF)
//
// The following formulas are used for decoding
// and encoding characters using surrogate pairs:
//
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
// W1 = 110110yyyyyyyyyy
// W2 = 110111xxxxxxxxxx
//
// where U is the character value, W1 is the high surrogate
// area, W2 is the low surrogate area.
// Check for incomplete UTF-16 character.
if raw_unread < 2 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 character",
parser.offset, -1)
}
break inner
}
// Get the character.
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
// Check for unexpected low surrogate area.
if value&0xFC00 == 0xDC00 {
return yaml_parser_set_reader_error(parser,
"unexpected low surrogate area",
parser.offset, int(value))
}
// Check for a high surrogate area.
if value&0xFC00 == 0xD800 {
width = 4
// Check for incomplete surrogate pair.
if raw_unread < 4 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 surrogate pair",
parser.offset, -1)
}
break inner
}
// Get the next character.
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
// Check for a low surrogate area.
if value2&0xFC00 != 0xDC00 {
return yaml_parser_set_reader_error(parser,
"expected low surrogate area",
parser.offset+2, int(value2))
}
// Generate the value of the surrogate pair.
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
} else {
width = 2
}
default:
panic("impossible")
}
// Check if the character is in the allowed range:
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
// | [#x10000-#x10FFFF] (32 bit)
switch {
case value == 0x09:
case value == 0x0A:
case value == 0x0D:
case value >= 0x20 && value <= 0x7E:
case value == 0x85:
case value >= 0xA0 && value <= 0xD7FF:
case value >= 0xE000 && value <= 0xFFFD:
case value >= 0x10000 && value <= 0x10FFFF:
default:
return yaml_parser_set_reader_error(parser,
"control characters are not allowed",
parser.offset, int(value))
}
// Move the raw pointers.
parser.raw_buffer_pos += width
parser.offset += width
// Finally put the character into the buffer.
if value <= 0x7F {
// 0000 0000-0000 007F . 0xxxxxxx
parser.buffer[buffer_len+0] = byte(value)
buffer_len += 1
} else if value <= 0x7FF {
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
buffer_len += 2
} else if value <= 0xFFFF {
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
buffer_len += 3
} else {
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
buffer_len += 4
}
parser.unread++
}
// On EOF, put NUL into the buffer and return.
if parser.eof {
parser.buffer[buffer_len] = 0
buffer_len++
parser.unread++
break
}
}
parser.buffer = parser.buffer[:buffer_len]
return true
}

203
vendor/github.com/go-yaml/yaml/resolve.go generated vendored Normal file
View File

@@ -0,0 +1,203 @@
package yaml
import (
"encoding/base64"
"math"
"strconv"
"strings"
"unicode/utf8"
)
type resolveMapItem struct {
value interface{}
tag string
}
var resolveTable = make([]byte, 256)
var resolveMap = make(map[string]resolveMapItem)
func init() {
t := resolveTable
t[int('+')] = 'S' // Sign
t[int('-')] = 'S'
for _, c := range "0123456789" {
t[int(c)] = 'D' // Digit
}
for _, c := range "yYnNtTfFoO~" {
t[int(c)] = 'M' // In map
}
t[int('.')] = '.' // Float (potentially in map)
var resolveMapList = []struct {
v interface{}
tag string
l []string
}{
{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
{"<<", yaml_MERGE_TAG, []string{"<<"}},
}
m := resolveMap
for _, item := range resolveMapList {
for _, s := range item.l {
m[s] = resolveMapItem{item.v, item.tag}
}
}
}
const longTagPrefix = "tag:yaml.org,2002:"
func shortTag(tag string) string {
// TODO This can easily be made faster and produce less garbage.
if strings.HasPrefix(tag, longTagPrefix) {
return "!!" + tag[len(longTagPrefix):]
}
return tag
}
func longTag(tag string) string {
if strings.HasPrefix(tag, "!!") {
return longTagPrefix + tag[2:]
}
return tag
}
func resolvableTag(tag string) bool {
switch tag {
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
return true
}
return false
}
func resolve(tag string, in string) (rtag string, out interface{}) {
if !resolvableTag(tag) {
return tag, in
}
defer func() {
switch tag {
case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
return
}
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
}()
// Any data is accepted as a !!str or !!binary.
// Otherwise, the prefix is enough of a hint about what it might be.
hint := byte('N')
if in != "" {
hint = resolveTable[in[0]]
}
if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
// Handle things we can lookup in a map.
if item, ok := resolveMap[in]; ok {
return item.tag, item.value
}
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
// are purposefully unsupported here. They're still quoted on
// the way out for compatibility with other parser, though.
switch hint {
case 'M':
// We've already checked the map above.
case '.':
// Not in the map, so maybe a normal float.
floatv, err := strconv.ParseFloat(in, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
}
case 'D', 'S':
// Int, float, or timestamp.
plain := strings.Replace(in, "_", "", -1)
intv, err := strconv.ParseInt(plain, 0, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, int(intv)
} else {
return yaml_INT_TAG, intv
}
}
uintv, err := strconv.ParseUint(plain, 0, 64)
if err == nil {
return yaml_INT_TAG, uintv
}
floatv, err := strconv.ParseFloat(plain, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
}
if strings.HasPrefix(plain, "0b") {
intv, err := strconv.ParseInt(plain[2:], 2, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, int(intv)
} else {
return yaml_INT_TAG, intv
}
}
uintv, err := strconv.ParseUint(plain[2:], 2, 64)
if err == nil {
return yaml_INT_TAG, uintv
}
} else if strings.HasPrefix(plain, "-0b") {
intv, err := strconv.ParseInt(plain[3:], 2, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, -int(intv)
} else {
return yaml_INT_TAG, -intv
}
}
}
// XXX Handle timestamps here.
default:
panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
}
}
if tag == yaml_BINARY_TAG {
return yaml_BINARY_TAG, in
}
if utf8.ValidString(in) {
return yaml_STR_TAG, in
}
return yaml_BINARY_TAG, encodeBase64(in)
}
// encodeBase64 encodes s as base64 that is broken up into multiple lines
// as appropriate for the resulting length.
func encodeBase64(s string) string {
const lineLen = 70
encLen := base64.StdEncoding.EncodedLen(len(s))
lines := encLen/lineLen + 1
buf := make([]byte, encLen*2+lines)
in := buf[0:encLen]
out := buf[encLen:]
base64.StdEncoding.Encode(in, []byte(s))
k := 0
for i := 0; i < len(in); i += lineLen {
j := i + lineLen
if j > len(in) {
j = len(in)
}
k += copy(out[k:], in[i:j])
if lines > 1 {
out[k] = '\n'
k++
}
}
return string(out[:k])
}

Some files were not shown because too many files have changed in this diff Show More