mirror of
https://github.com/outbackdingo/matchbox.git
synced 2026-01-27 10:19:35 +00:00
vendor: Update Ignition and Fuze version
This commit is contained in:
22
glide.lock
generated
22
glide.lock
generated
@@ -1,20 +1,23 @@
|
||||
hash: 1f2a602a6a995b4f8eb93ad0fa3ef5734b57078366e5969237d117d02146db88
|
||||
updated: 2016-09-07T11:42:59.832174164-07:00
|
||||
hash: ab93fb738044e38ceeca7b0c8078bd00b48f1d345e9597a42a0451cd48410d3b
|
||||
updated: 2016-12-11T21:07:54.854170864-08:00
|
||||
imports:
|
||||
- name: github.com/ajeddeloh/go-json
|
||||
version: 73d058cf8437a1989030afe571eeab9f90eebbbd
|
||||
- name: github.com/ajeddeloh/yaml
|
||||
version: 1072abfea31191db507785e2e0c1b8d1440d35a5
|
||||
- name: github.com/alecthomas/units
|
||||
version: 2efee857e7cfd4f3d0138cc3cbb1b4966962b93a
|
||||
- name: github.com/camlistore/camlistore
|
||||
version: 9106ce829629773474c689b34aacd7d3aaa99426
|
||||
subpackages:
|
||||
- pkg/errorutil
|
||||
- name: github.com/coreos/coreos-cloudinit
|
||||
version: 4c333e657bfbaa8f6594298b48324f45e6bf5961
|
||||
subpackages:
|
||||
- config
|
||||
- name: github.com/coreos/fuze
|
||||
version: 7df4f06041d9daba45e4c68221b9b04203dff1d8
|
||||
version: 63c72bc1c8875f7f4ca11800a1a8de0478a69a12
|
||||
subpackages:
|
||||
- config
|
||||
- config/types
|
||||
- name: github.com/coreos/go-semver
|
||||
version: 294930c1e79c64e7dbe360054274fdad492c8cf5
|
||||
subpackages:
|
||||
@@ -24,12 +27,15 @@ imports:
|
||||
subpackages:
|
||||
- journal
|
||||
- name: github.com/coreos/ignition
|
||||
version: b6850837b3b9bd17b673e58b5c406b5e4192ddca
|
||||
version: 3ffd793b1292c6b0b3519bce214bdb41f336faa7
|
||||
subpackages:
|
||||
- config
|
||||
- config/types
|
||||
- config/v1
|
||||
- config/v1/types
|
||||
- config/validate
|
||||
- config/validate/astjson
|
||||
- config/validate/report
|
||||
- name: github.com/coreos/pkg
|
||||
version: 66fe44ad037ccb80329115cb4db0dbe8e9beb03a
|
||||
subpackages:
|
||||
@@ -40,8 +46,6 @@ imports:
|
||||
version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
subpackages:
|
||||
- spew
|
||||
- name: github.com/go-yaml/yaml
|
||||
version: a83829b6f1293c91addabc89d0571c246397bbf4
|
||||
- name: github.com/golang/protobuf
|
||||
version: 7cc19b78d562895b13596ddce7aafb59dd789318
|
||||
subpackages:
|
||||
@@ -89,7 +93,7 @@ imports:
|
||||
- internal/timeseries
|
||||
- trace
|
||||
- name: golang.org/x/sys
|
||||
version: d4feaf1a7e61e1d9e79e6c4e76c6349e9cab0a03
|
||||
version: 478fcf54317e52ab69f40bb4c7a1520288d7f7ea
|
||||
subpackages:
|
||||
- unix
|
||||
- name: google.golang.org/grpc
|
||||
|
||||
17
glide.yaml
17
glide.yaml
@@ -9,13 +9,24 @@ import:
|
||||
subpackages:
|
||||
- codes
|
||||
- package: github.com/coreos/ignition
|
||||
version: b6850837b3b9bd17b673e58b5c406b5e4192ddca
|
||||
version: 3ffd793b1292c6b0b3519bce214bdb41f336faa7
|
||||
subpackages:
|
||||
- config
|
||||
- config/types
|
||||
- config/v1
|
||||
- config/v1/types
|
||||
- config/validate
|
||||
- config/validate/astjson
|
||||
- config/validate/report
|
||||
- package: github.com/coreos/fuze
|
||||
version: 7df4f06041d9daba45e4c68221b9b04203dff1d8
|
||||
version: 63c72bc1c8875f7f4ca11800a1a8de0478a69a12
|
||||
subpackages:
|
||||
- config
|
||||
- config/types
|
||||
- package: github.com/ajeddeloh/yaml
|
||||
version: 1072abfea31191db507785e2e0c1b8d1440d35a5
|
||||
- package: github.com/vincent-petithory/dataurl
|
||||
version: 9a301d65acbb728fcc3ace14f45f511a4cfeea9c
|
||||
- package: github.com/coreos/coreos-cloudinit
|
||||
version: v1.11.0
|
||||
subpackages:
|
||||
@@ -72,8 +83,6 @@ import:
|
||||
- difflib
|
||||
- package: github.com/spf13/pflag
|
||||
version: 7f60f83a2c81bc3c3c0d5297f61ddfa68da9d3b7
|
||||
- package: github.com/vincent-petithory/dataurl
|
||||
version: 9a301d65acbb728fcc3ace14f45f511a4cfeea9c
|
||||
- package: go4.org
|
||||
version: 03efcb870d84809319ea509714dd6d19a1498483
|
||||
subpackages:
|
||||
|
||||
10
vendor/github.com/ajeddeloh/go-json/README
generated
vendored
Normal file
10
vendor/github.com/ajeddeloh/go-json/README
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
This is a fork of go's encoding/json library. It adds the a third target for unmarshalling, json.Node.
|
||||
Unmarshalling to a Node behaves similarilarly to unmarshalling to an interface{}, except it also records
|
||||
the offsets for the start and end of the value that was unmarshalled and, if the value was part of a json
|
||||
object, it also records the offsets of the start and end of the object's key. The Value field of the Node
|
||||
will be unmarshalled to the same types as if it were an interface{}, except in the case of arrays and
|
||||
objects. In those case it will be unmarshalled to a []Node or map[string]Node instead []interface{} or
|
||||
map[string]interface{} for arrays and objects, respectively.
|
||||
|
||||
There are two branchs, go15 and go16. go15 contains the modified go1.5 library and go16 contains the
|
||||
modified go1.6 library.
|
||||
223
vendor/github.com/ajeddeloh/go-json/bench_test.go
generated
vendored
Normal file
223
vendor/github.com/ajeddeloh/go-json/bench_test.go
generated
vendored
Normal file
@@ -0,0 +1,223 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Large data benchmark.
|
||||
// The JSON data is a summary of agl's changes in the
|
||||
// go, webkit, and chromium open source projects.
|
||||
// We benchmark converting between the JSON form
|
||||
// and in-memory data structures.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type codeResponse struct {
|
||||
Tree *codeNode `json:"tree"`
|
||||
Username string `json:"username"`
|
||||
}
|
||||
|
||||
type codeNode struct {
|
||||
Name string `json:"name"`
|
||||
Kids []*codeNode `json:"kids"`
|
||||
CLWeight float64 `json:"cl_weight"`
|
||||
Touches int `json:"touches"`
|
||||
MinT int64 `json:"min_t"`
|
||||
MaxT int64 `json:"max_t"`
|
||||
MeanT int64 `json:"mean_t"`
|
||||
}
|
||||
|
||||
var codeJSON []byte
|
||||
var codeStruct codeResponse
|
||||
|
||||
func codeInit() {
|
||||
f, err := os.Open("testdata/code.json.gz")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer f.Close()
|
||||
gz, err := gzip.NewReader(f)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
data, err := ioutil.ReadAll(gz)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
codeJSON = data
|
||||
|
||||
if err := Unmarshal(codeJSON, &codeStruct); err != nil {
|
||||
panic("unmarshal code.json: " + err.Error())
|
||||
}
|
||||
|
||||
if data, err = Marshal(&codeStruct); err != nil {
|
||||
panic("marshal code.json: " + err.Error())
|
||||
}
|
||||
|
||||
if !bytes.Equal(data, codeJSON) {
|
||||
println("different lengths", len(data), len(codeJSON))
|
||||
for i := 0; i < len(data) && i < len(codeJSON); i++ {
|
||||
if data[i] != codeJSON[i] {
|
||||
println("re-marshal: changed at byte", i)
|
||||
println("orig: ", string(codeJSON[i-10:i+10]))
|
||||
println("new: ", string(data[i-10:i+10]))
|
||||
break
|
||||
}
|
||||
}
|
||||
panic("re-marshal code.json: different result")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCodeEncoder(b *testing.B) {
|
||||
if codeJSON == nil {
|
||||
b.StopTimer()
|
||||
codeInit()
|
||||
b.StartTimer()
|
||||
}
|
||||
enc := NewEncoder(ioutil.Discard)
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := enc.Encode(&codeStruct); err != nil {
|
||||
b.Fatal("Encode:", err)
|
||||
}
|
||||
}
|
||||
b.SetBytes(int64(len(codeJSON)))
|
||||
}
|
||||
|
||||
func BenchmarkCodeMarshal(b *testing.B) {
|
||||
if codeJSON == nil {
|
||||
b.StopTimer()
|
||||
codeInit()
|
||||
b.StartTimer()
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err := Marshal(&codeStruct); err != nil {
|
||||
b.Fatal("Marshal:", err)
|
||||
}
|
||||
}
|
||||
b.SetBytes(int64(len(codeJSON)))
|
||||
}
|
||||
|
||||
func BenchmarkCodeDecoder(b *testing.B) {
|
||||
if codeJSON == nil {
|
||||
b.StopTimer()
|
||||
codeInit()
|
||||
b.StartTimer()
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
dec := NewDecoder(&buf)
|
||||
var r codeResponse
|
||||
for i := 0; i < b.N; i++ {
|
||||
buf.Write(codeJSON)
|
||||
// hide EOF
|
||||
buf.WriteByte('\n')
|
||||
buf.WriteByte('\n')
|
||||
buf.WriteByte('\n')
|
||||
if err := dec.Decode(&r); err != nil {
|
||||
b.Fatal("Decode:", err)
|
||||
}
|
||||
}
|
||||
b.SetBytes(int64(len(codeJSON)))
|
||||
}
|
||||
|
||||
func BenchmarkDecoderStream(b *testing.B) {
|
||||
b.StopTimer()
|
||||
var buf bytes.Buffer
|
||||
dec := NewDecoder(&buf)
|
||||
buf.WriteString(`"` + strings.Repeat("x", 1000000) + `"` + "\n\n\n")
|
||||
var x interface{}
|
||||
if err := dec.Decode(&x); err != nil {
|
||||
b.Fatal("Decode:", err)
|
||||
}
|
||||
ones := strings.Repeat(" 1\n", 300000) + "\n\n\n"
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if i%300000 == 0 {
|
||||
buf.WriteString(ones)
|
||||
}
|
||||
x = nil
|
||||
if err := dec.Decode(&x); err != nil || x != 1.0 {
|
||||
b.Fatalf("Decode: %v after %d", err, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCodeUnmarshal(b *testing.B) {
|
||||
if codeJSON == nil {
|
||||
b.StopTimer()
|
||||
codeInit()
|
||||
b.StartTimer()
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
var r codeResponse
|
||||
if err := Unmarshal(codeJSON, &r); err != nil {
|
||||
b.Fatal("Unmmarshal:", err)
|
||||
}
|
||||
}
|
||||
b.SetBytes(int64(len(codeJSON)))
|
||||
}
|
||||
|
||||
func BenchmarkCodeUnmarshalReuse(b *testing.B) {
|
||||
if codeJSON == nil {
|
||||
b.StopTimer()
|
||||
codeInit()
|
||||
b.StartTimer()
|
||||
}
|
||||
var r codeResponse
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := Unmarshal(codeJSON, &r); err != nil {
|
||||
b.Fatal("Unmmarshal:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalString(b *testing.B) {
|
||||
data := []byte(`"hello, world"`)
|
||||
var s string
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := Unmarshal(data, &s); err != nil {
|
||||
b.Fatal("Unmarshal:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalFloat64(b *testing.B) {
|
||||
var f float64
|
||||
data := []byte(`3.14`)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := Unmarshal(data, &f); err != nil {
|
||||
b.Fatal("Unmarshal:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalInt64(b *testing.B) {
|
||||
var x int64
|
||||
data := []byte(`3`)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := Unmarshal(data, &x); err != nil {
|
||||
b.Fatal("Unmarshal:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIssue10335(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
var s struct{}
|
||||
j := []byte(`{"a":{ }}`)
|
||||
for n := 0; n < b.N; n++ {
|
||||
if err := Unmarshal(j, &s); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
1226
vendor/github.com/ajeddeloh/go-json/decode.go
generated
vendored
Normal file
1226
vendor/github.com/ajeddeloh/go-json/decode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1453
vendor/github.com/ajeddeloh/go-json/decode_test.go
generated
vendored
Normal file
1453
vendor/github.com/ajeddeloh/go-json/decode_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1194
vendor/github.com/ajeddeloh/go-json/encode.go
generated
vendored
Normal file
1194
vendor/github.com/ajeddeloh/go-json/encode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
532
vendor/github.com/ajeddeloh/go-json/encode_test.go
generated
vendored
Normal file
532
vendor/github.com/ajeddeloh/go-json/encode_test.go
generated
vendored
Normal file
@@ -0,0 +1,532 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type Optionals struct {
|
||||
Sr string `json:"sr"`
|
||||
So string `json:"so,omitempty"`
|
||||
Sw string `json:"-"`
|
||||
|
||||
Ir int `json:"omitempty"` // actually named omitempty, not an option
|
||||
Io int `json:"io,omitempty"`
|
||||
|
||||
Slr []string `json:"slr,random"`
|
||||
Slo []string `json:"slo,omitempty"`
|
||||
|
||||
Mr map[string]interface{} `json:"mr"`
|
||||
Mo map[string]interface{} `json:",omitempty"`
|
||||
|
||||
Fr float64 `json:"fr"`
|
||||
Fo float64 `json:"fo,omitempty"`
|
||||
|
||||
Br bool `json:"br"`
|
||||
Bo bool `json:"bo,omitempty"`
|
||||
|
||||
Ur uint `json:"ur"`
|
||||
Uo uint `json:"uo,omitempty"`
|
||||
|
||||
Str struct{} `json:"str"`
|
||||
Sto struct{} `json:"sto,omitempty"`
|
||||
}
|
||||
|
||||
var optionalsExpected = `{
|
||||
"sr": "",
|
||||
"omitempty": 0,
|
||||
"slr": null,
|
||||
"mr": {},
|
||||
"fr": 0,
|
||||
"br": false,
|
||||
"ur": 0,
|
||||
"str": {},
|
||||
"sto": {}
|
||||
}`
|
||||
|
||||
func TestOmitEmpty(t *testing.T) {
|
||||
var o Optionals
|
||||
o.Sw = "something"
|
||||
o.Mr = map[string]interface{}{}
|
||||
o.Mo = map[string]interface{}{}
|
||||
|
||||
got, err := MarshalIndent(&o, "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got := string(got); got != optionalsExpected {
|
||||
t.Errorf(" got: %s\nwant: %s\n", got, optionalsExpected)
|
||||
}
|
||||
}
|
||||
|
||||
type StringTag struct {
|
||||
BoolStr bool `json:",string"`
|
||||
IntStr int64 `json:",string"`
|
||||
StrStr string `json:",string"`
|
||||
}
|
||||
|
||||
var stringTagExpected = `{
|
||||
"BoolStr": "true",
|
||||
"IntStr": "42",
|
||||
"StrStr": "\"xzbit\""
|
||||
}`
|
||||
|
||||
func TestStringTag(t *testing.T) {
|
||||
var s StringTag
|
||||
s.BoolStr = true
|
||||
s.IntStr = 42
|
||||
s.StrStr = "xzbit"
|
||||
got, err := MarshalIndent(&s, "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got := string(got); got != stringTagExpected {
|
||||
t.Fatalf(" got: %s\nwant: %s\n", got, stringTagExpected)
|
||||
}
|
||||
|
||||
// Verify that it round-trips.
|
||||
var s2 StringTag
|
||||
err = NewDecoder(bytes.NewReader(got)).Decode(&s2)
|
||||
if err != nil {
|
||||
t.Fatalf("Decode: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(s, s2) {
|
||||
t.Fatalf("decode didn't match.\nsource: %#v\nEncoded as:\n%s\ndecode: %#v", s, string(got), s2)
|
||||
}
|
||||
}
|
||||
|
||||
// byte slices are special even if they're renamed types.
|
||||
type renamedByte byte
|
||||
type renamedByteSlice []byte
|
||||
type renamedRenamedByteSlice []renamedByte
|
||||
|
||||
func TestEncodeRenamedByteSlice(t *testing.T) {
|
||||
s := renamedByteSlice("abc")
|
||||
result, err := Marshal(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expect := `"YWJj"`
|
||||
if string(result) != expect {
|
||||
t.Errorf(" got %s want %s", result, expect)
|
||||
}
|
||||
r := renamedRenamedByteSlice("abc")
|
||||
result, err = Marshal(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(result) != expect {
|
||||
t.Errorf(" got %s want %s", result, expect)
|
||||
}
|
||||
}
|
||||
|
||||
var unsupportedValues = []interface{}{
|
||||
math.NaN(),
|
||||
math.Inf(-1),
|
||||
math.Inf(1),
|
||||
}
|
||||
|
||||
func TestUnsupportedValues(t *testing.T) {
|
||||
for _, v := range unsupportedValues {
|
||||
if _, err := Marshal(v); err != nil {
|
||||
if _, ok := err.(*UnsupportedValueError); !ok {
|
||||
t.Errorf("for %v, got %T want UnsupportedValueError", v, err)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("for %v, expected error", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ref has Marshaler and Unmarshaler methods with pointer receiver.
|
||||
type Ref int
|
||||
|
||||
func (*Ref) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"ref"`), nil
|
||||
}
|
||||
|
||||
func (r *Ref) UnmarshalJSON([]byte) error {
|
||||
*r = 12
|
||||
return nil
|
||||
}
|
||||
|
||||
// Val has Marshaler methods with value receiver.
|
||||
type Val int
|
||||
|
||||
func (Val) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"val"`), nil
|
||||
}
|
||||
|
||||
// RefText has Marshaler and Unmarshaler methods with pointer receiver.
|
||||
type RefText int
|
||||
|
||||
func (*RefText) MarshalText() ([]byte, error) {
|
||||
return []byte(`"ref"`), nil
|
||||
}
|
||||
|
||||
func (r *RefText) UnmarshalText([]byte) error {
|
||||
*r = 13
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValText has Marshaler methods with value receiver.
|
||||
type ValText int
|
||||
|
||||
func (ValText) MarshalText() ([]byte, error) {
|
||||
return []byte(`"val"`), nil
|
||||
}
|
||||
|
||||
func TestRefValMarshal(t *testing.T) {
|
||||
var s = struct {
|
||||
R0 Ref
|
||||
R1 *Ref
|
||||
R2 RefText
|
||||
R3 *RefText
|
||||
V0 Val
|
||||
V1 *Val
|
||||
V2 ValText
|
||||
V3 *ValText
|
||||
}{
|
||||
R0: 12,
|
||||
R1: new(Ref),
|
||||
R2: 14,
|
||||
R3: new(RefText),
|
||||
V0: 13,
|
||||
V1: new(Val),
|
||||
V2: 15,
|
||||
V3: new(ValText),
|
||||
}
|
||||
const want = `{"R0":"ref","R1":"ref","R2":"\"ref\"","R3":"\"ref\"","V0":"val","V1":"val","V2":"\"val\"","V3":"\"val\""}`
|
||||
b, err := Marshal(&s)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal: %v", err)
|
||||
}
|
||||
if got := string(b); got != want {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// C implements Marshaler and returns unescaped JSON.
|
||||
type C int
|
||||
|
||||
func (C) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"<&>"`), nil
|
||||
}
|
||||
|
||||
// CText implements Marshaler and returns unescaped text.
|
||||
type CText int
|
||||
|
||||
func (CText) MarshalText() ([]byte, error) {
|
||||
return []byte(`"<&>"`), nil
|
||||
}
|
||||
|
||||
func TestMarshalerEscaping(t *testing.T) {
|
||||
var c C
|
||||
want := `"\u003c\u0026\u003e"`
|
||||
b, err := Marshal(c)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal(c): %v", err)
|
||||
}
|
||||
if got := string(b); got != want {
|
||||
t.Errorf("Marshal(c) = %#q, want %#q", got, want)
|
||||
}
|
||||
|
||||
var ct CText
|
||||
want = `"\"\u003c\u0026\u003e\""`
|
||||
b, err = Marshal(ct)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal(ct): %v", err)
|
||||
}
|
||||
if got := string(b); got != want {
|
||||
t.Errorf("Marshal(ct) = %#q, want %#q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
type IntType int
|
||||
|
||||
type MyStruct struct {
|
||||
IntType
|
||||
}
|
||||
|
||||
func TestAnonymousNonstruct(t *testing.T) {
|
||||
var i IntType = 11
|
||||
a := MyStruct{i}
|
||||
const want = `{"IntType":11}`
|
||||
|
||||
b, err := Marshal(a)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal: %v", err)
|
||||
}
|
||||
if got := string(b); got != want {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
type BugA struct {
|
||||
S string
|
||||
}
|
||||
|
||||
type BugB struct {
|
||||
BugA
|
||||
S string
|
||||
}
|
||||
|
||||
type BugC struct {
|
||||
S string
|
||||
}
|
||||
|
||||
// Legal Go: We never use the repeated embedded field (S).
|
||||
type BugX struct {
|
||||
A int
|
||||
BugA
|
||||
BugB
|
||||
}
|
||||
|
||||
// Issue 5245.
|
||||
func TestEmbeddedBug(t *testing.T) {
|
||||
v := BugB{
|
||||
BugA{"A"},
|
||||
"B",
|
||||
}
|
||||
b, err := Marshal(v)
|
||||
if err != nil {
|
||||
t.Fatal("Marshal:", err)
|
||||
}
|
||||
want := `{"S":"B"}`
|
||||
got := string(b)
|
||||
if got != want {
|
||||
t.Fatalf("Marshal: got %s want %s", got, want)
|
||||
}
|
||||
// Now check that the duplicate field, S, does not appear.
|
||||
x := BugX{
|
||||
A: 23,
|
||||
}
|
||||
b, err = Marshal(x)
|
||||
if err != nil {
|
||||
t.Fatal("Marshal:", err)
|
||||
}
|
||||
want = `{"A":23}`
|
||||
got = string(b)
|
||||
if got != want {
|
||||
t.Fatalf("Marshal: got %s want %s", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
type BugD struct { // Same as BugA after tagging.
|
||||
XXX string `json:"S"`
|
||||
}
|
||||
|
||||
// BugD's tagged S field should dominate BugA's.
|
||||
type BugY struct {
|
||||
BugA
|
||||
BugD
|
||||
}
|
||||
|
||||
// Test that a field with a tag dominates untagged fields.
|
||||
func TestTaggedFieldDominates(t *testing.T) {
|
||||
v := BugY{
|
||||
BugA{"BugA"},
|
||||
BugD{"BugD"},
|
||||
}
|
||||
b, err := Marshal(v)
|
||||
if err != nil {
|
||||
t.Fatal("Marshal:", err)
|
||||
}
|
||||
want := `{"S":"BugD"}`
|
||||
got := string(b)
|
||||
if got != want {
|
||||
t.Fatalf("Marshal: got %s want %s", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// There are no tags here, so S should not appear.
|
||||
type BugZ struct {
|
||||
BugA
|
||||
BugC
|
||||
BugY // Contains a tagged S field through BugD; should not dominate.
|
||||
}
|
||||
|
||||
func TestDuplicatedFieldDisappears(t *testing.T) {
|
||||
v := BugZ{
|
||||
BugA{"BugA"},
|
||||
BugC{"BugC"},
|
||||
BugY{
|
||||
BugA{"nested BugA"},
|
||||
BugD{"nested BugD"},
|
||||
},
|
||||
}
|
||||
b, err := Marshal(v)
|
||||
if err != nil {
|
||||
t.Fatal("Marshal:", err)
|
||||
}
|
||||
want := `{}`
|
||||
got := string(b)
|
||||
if got != want {
|
||||
t.Fatalf("Marshal: got %s want %s", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringBytes(t *testing.T) {
|
||||
// Test that encodeState.stringBytes and encodeState.string use the same encoding.
|
||||
es := &encodeState{}
|
||||
var r []rune
|
||||
for i := '\u0000'; i <= unicode.MaxRune; i++ {
|
||||
r = append(r, i)
|
||||
}
|
||||
s := string(r) + "\xff\xff\xffhello" // some invalid UTF-8 too
|
||||
_, err := es.string(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
esBytes := &encodeState{}
|
||||
_, err = esBytes.stringBytes([]byte(s))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
enc := es.Buffer.String()
|
||||
encBytes := esBytes.Buffer.String()
|
||||
if enc != encBytes {
|
||||
i := 0
|
||||
for i < len(enc) && i < len(encBytes) && enc[i] == encBytes[i] {
|
||||
i++
|
||||
}
|
||||
enc = enc[i:]
|
||||
encBytes = encBytes[i:]
|
||||
i = 0
|
||||
for i < len(enc) && i < len(encBytes) && enc[len(enc)-i-1] == encBytes[len(encBytes)-i-1] {
|
||||
i++
|
||||
}
|
||||
enc = enc[:len(enc)-i]
|
||||
encBytes = encBytes[:len(encBytes)-i]
|
||||
|
||||
if len(enc) > 20 {
|
||||
enc = enc[:20] + "..."
|
||||
}
|
||||
if len(encBytes) > 20 {
|
||||
encBytes = encBytes[:20] + "..."
|
||||
}
|
||||
|
||||
t.Errorf("encodings differ at %#q vs %#q", enc, encBytes)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue6458(t *testing.T) {
|
||||
type Foo struct {
|
||||
M RawMessage
|
||||
}
|
||||
x := Foo{RawMessage(`"foo"`)}
|
||||
|
||||
b, err := Marshal(&x)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if want := `{"M":"foo"}`; string(b) != want {
|
||||
t.Errorf("Marshal(&x) = %#q; want %#q", b, want)
|
||||
}
|
||||
|
||||
b, err = Marshal(x)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want := `{"M":"ImZvbyI="}`; string(b) != want {
|
||||
t.Errorf("Marshal(x) = %#q; want %#q", b, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTMLEscape(t *testing.T) {
|
||||
var b, want bytes.Buffer
|
||||
m := `{"M":"<html>foo &` + "\xe2\x80\xa8 \xe2\x80\xa9" + `</html>"}`
|
||||
want.Write([]byte(`{"M":"\u003chtml\u003efoo \u0026\u2028 \u2029\u003c/html\u003e"}`))
|
||||
HTMLEscape(&b, []byte(m))
|
||||
if !bytes.Equal(b.Bytes(), want.Bytes()) {
|
||||
t.Errorf("HTMLEscape(&b, []byte(m)) = %s; want %s", b.Bytes(), want.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
// golang.org/issue/8582
|
||||
func TestEncodePointerString(t *testing.T) {
|
||||
type stringPointer struct {
|
||||
N *int64 `json:"n,string"`
|
||||
}
|
||||
var n int64 = 42
|
||||
b, err := Marshal(stringPointer{N: &n})
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal: %v", err)
|
||||
}
|
||||
if got, want := string(b), `{"n":"42"}`; got != want {
|
||||
t.Errorf("Marshal = %s, want %s", got, want)
|
||||
}
|
||||
var back stringPointer
|
||||
err = Unmarshal(b, &back)
|
||||
if err != nil {
|
||||
t.Fatalf("Unmarshal: %v", err)
|
||||
}
|
||||
if back.N == nil {
|
||||
t.Fatalf("Unmarshalled nil N field")
|
||||
}
|
||||
if *back.N != 42 {
|
||||
t.Fatalf("*N = %d; want 42", *back.N)
|
||||
}
|
||||
}
|
||||
|
||||
var encodeStringTests = []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"\x00", `"\u0000"`},
|
||||
{"\x01", `"\u0001"`},
|
||||
{"\x02", `"\u0002"`},
|
||||
{"\x03", `"\u0003"`},
|
||||
{"\x04", `"\u0004"`},
|
||||
{"\x05", `"\u0005"`},
|
||||
{"\x06", `"\u0006"`},
|
||||
{"\x07", `"\u0007"`},
|
||||
{"\x08", `"\u0008"`},
|
||||
{"\x09", `"\t"`},
|
||||
{"\x0a", `"\n"`},
|
||||
{"\x0b", `"\u000b"`},
|
||||
{"\x0c", `"\u000c"`},
|
||||
{"\x0d", `"\r"`},
|
||||
{"\x0e", `"\u000e"`},
|
||||
{"\x0f", `"\u000f"`},
|
||||
{"\x10", `"\u0010"`},
|
||||
{"\x11", `"\u0011"`},
|
||||
{"\x12", `"\u0012"`},
|
||||
{"\x13", `"\u0013"`},
|
||||
{"\x14", `"\u0014"`},
|
||||
{"\x15", `"\u0015"`},
|
||||
{"\x16", `"\u0016"`},
|
||||
{"\x17", `"\u0017"`},
|
||||
{"\x18", `"\u0018"`},
|
||||
{"\x19", `"\u0019"`},
|
||||
{"\x1a", `"\u001a"`},
|
||||
{"\x1b", `"\u001b"`},
|
||||
{"\x1c", `"\u001c"`},
|
||||
{"\x1d", `"\u001d"`},
|
||||
{"\x1e", `"\u001e"`},
|
||||
{"\x1f", `"\u001f"`},
|
||||
}
|
||||
|
||||
func TestEncodeString(t *testing.T) {
|
||||
for _, tt := range encodeStringTests {
|
||||
b, err := Marshal(tt.in)
|
||||
if err != nil {
|
||||
t.Errorf("Marshal(%q): %v", tt.in, err)
|
||||
continue
|
||||
}
|
||||
out := string(b)
|
||||
if out != tt.out {
|
||||
t.Errorf("Marshal(%q) = %#q, want %#q", tt.in, out, tt.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
252
vendor/github.com/ajeddeloh/go-json/example_test.go
generated
vendored
Normal file
252
vendor/github.com/ajeddeloh/go-json/example_test.go
generated
vendored
Normal file
@@ -0,0 +1,252 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func ExampleMarshal() {
|
||||
type ColorGroup struct {
|
||||
ID int
|
||||
Name string
|
||||
Colors []string
|
||||
}
|
||||
group := ColorGroup{
|
||||
ID: 1,
|
||||
Name: "Reds",
|
||||
Colors: []string{"Crimson", "Red", "Ruby", "Maroon"},
|
||||
}
|
||||
b, err := json.Marshal(group)
|
||||
if err != nil {
|
||||
fmt.Println("error:", err)
|
||||
}
|
||||
os.Stdout.Write(b)
|
||||
// Output:
|
||||
// {"ID":1,"Name":"Reds","Colors":["Crimson","Red","Ruby","Maroon"]}
|
||||
}
|
||||
|
||||
func ExampleUnmarshal() {
|
||||
var jsonBlob = []byte(`[
|
||||
{"Name": "Platypus", "Order": "Monotremata"},
|
||||
{"Name": "Quoll", "Order": "Dasyuromorphia"}
|
||||
]`)
|
||||
type Animal struct {
|
||||
Name string
|
||||
Order string
|
||||
}
|
||||
var animals []Animal
|
||||
err := json.Unmarshal(jsonBlob, &animals)
|
||||
if err != nil {
|
||||
fmt.Println("error:", err)
|
||||
}
|
||||
fmt.Printf("%+v", animals)
|
||||
// Output:
|
||||
// [{Name:Platypus Order:Monotremata} {Name:Quoll Order:Dasyuromorphia}]
|
||||
}
|
||||
|
||||
// This example uses a Decoder to decode a stream of distinct JSON values.
|
||||
func ExampleDecoder() {
|
||||
const jsonStream = `
|
||||
{"Name": "Ed", "Text": "Knock knock."}
|
||||
{"Name": "Sam", "Text": "Who's there?"}
|
||||
{"Name": "Ed", "Text": "Go fmt."}
|
||||
{"Name": "Sam", "Text": "Go fmt who?"}
|
||||
{"Name": "Ed", "Text": "Go fmt yourself!"}
|
||||
`
|
||||
type Message struct {
|
||||
Name, Text string
|
||||
}
|
||||
dec := json.NewDecoder(strings.NewReader(jsonStream))
|
||||
for {
|
||||
var m Message
|
||||
if err := dec.Decode(&m); err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("%s: %s\n", m.Name, m.Text)
|
||||
}
|
||||
// Output:
|
||||
// Ed: Knock knock.
|
||||
// Sam: Who's there?
|
||||
// Ed: Go fmt.
|
||||
// Sam: Go fmt who?
|
||||
// Ed: Go fmt yourself!
|
||||
}
|
||||
|
||||
// This example uses a Decoder to decode a stream of distinct JSON values.
|
||||
func ExampleDecoder_Token() {
|
||||
const jsonStream = `
|
||||
{"Message": "Hello", "Array": [1, 2, 3], "Null": null, "Number": 1.234}
|
||||
`
|
||||
dec := json.NewDecoder(strings.NewReader(jsonStream))
|
||||
for {
|
||||
t, err := dec.Token()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("%T: %v", t, t)
|
||||
if dec.More() {
|
||||
fmt.Printf(" (more)")
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
// Output:
|
||||
// json.Delim: { (more)
|
||||
// string: Message (more)
|
||||
// string: Hello (more)
|
||||
// string: Array (more)
|
||||
// json.Delim: [ (more)
|
||||
// float64: 1 (more)
|
||||
// float64: 2 (more)
|
||||
// float64: 3
|
||||
// json.Delim: ] (more)
|
||||
// string: Null (more)
|
||||
// <nil>: <nil> (more)
|
||||
// string: Number (more)
|
||||
// float64: 1.234
|
||||
// json.Delim: }
|
||||
}
|
||||
|
||||
// This example uses a Decoder to decode a streaming array of JSON objects.
|
||||
func ExampleDecoder_Decode_stream() {
|
||||
const jsonStream = `
|
||||
[
|
||||
{"Name": "Ed", "Text": "Knock knock."},
|
||||
{"Name": "Sam", "Text": "Who's there?"},
|
||||
{"Name": "Ed", "Text": "Go fmt."},
|
||||
{"Name": "Sam", "Text": "Go fmt who?"},
|
||||
{"Name": "Ed", "Text": "Go fmt yourself!"}
|
||||
]
|
||||
`
|
||||
type Message struct {
|
||||
Name, Text string
|
||||
}
|
||||
dec := json.NewDecoder(strings.NewReader(jsonStream))
|
||||
|
||||
// read open bracket
|
||||
t, err := dec.Token()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("%T: %v\n", t, t)
|
||||
|
||||
var m Message
|
||||
// while the array contains values
|
||||
for dec.More() {
|
||||
|
||||
// decode an array value (Message)
|
||||
err := dec.Decode(&m)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Printf("%v: %v\n", m.Name, m.Text)
|
||||
}
|
||||
|
||||
// read closing bracket
|
||||
t, err = dec.Token()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("%T: %v\n", t, t)
|
||||
|
||||
// Output:
|
||||
// json.Delim: [
|
||||
// Ed: Knock knock.
|
||||
// Sam: Who's there?
|
||||
// Ed: Go fmt.
|
||||
// Sam: Go fmt who?
|
||||
// Ed: Go fmt yourself!
|
||||
// json.Delim: ]
|
||||
|
||||
}
|
||||
|
||||
// This example uses RawMessage to delay parsing part of a JSON message.
|
||||
func ExampleRawMessage() {
|
||||
type Color struct {
|
||||
Space string
|
||||
Point json.RawMessage // delay parsing until we know the color space
|
||||
}
|
||||
type RGB struct {
|
||||
R uint8
|
||||
G uint8
|
||||
B uint8
|
||||
}
|
||||
type YCbCr struct {
|
||||
Y uint8
|
||||
Cb int8
|
||||
Cr int8
|
||||
}
|
||||
|
||||
var j = []byte(`[
|
||||
{"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}},
|
||||
{"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}}
|
||||
]`)
|
||||
var colors []Color
|
||||
err := json.Unmarshal(j, &colors)
|
||||
if err != nil {
|
||||
log.Fatalln("error:", err)
|
||||
}
|
||||
|
||||
for _, c := range colors {
|
||||
var dst interface{}
|
||||
switch c.Space {
|
||||
case "RGB":
|
||||
dst = new(RGB)
|
||||
case "YCbCr":
|
||||
dst = new(YCbCr)
|
||||
}
|
||||
err := json.Unmarshal(c.Point, dst)
|
||||
if err != nil {
|
||||
log.Fatalln("error:", err)
|
||||
}
|
||||
fmt.Println(c.Space, dst)
|
||||
}
|
||||
// Output:
|
||||
// YCbCr &{255 0 -10}
|
||||
// RGB &{98 218 255}
|
||||
}
|
||||
|
||||
func ExampleIndent() {
|
||||
type Road struct {
|
||||
Name string
|
||||
Number int
|
||||
}
|
||||
roads := []Road{
|
||||
{"Diamond Fork", 29},
|
||||
{"Sheep Creek", 51},
|
||||
}
|
||||
|
||||
b, err := json.Marshal(roads)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
json.Indent(&out, b, "=", "\t")
|
||||
out.WriteTo(os.Stdout)
|
||||
// Output:
|
||||
// [
|
||||
// = {
|
||||
// = "Name": "Diamond Fork",
|
||||
// = "Number": 29
|
||||
// = },
|
||||
// = {
|
||||
// = "Name": "Sheep Creek",
|
||||
// = "Number": 51
|
||||
// = }
|
||||
// =]
|
||||
}
|
||||
143
vendor/github.com/ajeddeloh/go-json/fold.go
generated
vendored
Normal file
143
vendor/github.com/ajeddeloh/go-json/fold.go
generated
vendored
Normal file
@@ -0,0 +1,143 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
|
||||
kelvin = '\u212a'
|
||||
smallLongEss = '\u017f'
|
||||
)
|
||||
|
||||
// foldFunc returns one of four different case folding equivalence
|
||||
// functions, from most general (and slow) to fastest:
|
||||
//
|
||||
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
|
||||
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
|
||||
// 3) asciiEqualFold, no special, but includes non-letters (including _)
|
||||
// 4) simpleLetterEqualFold, no specials, no non-letters.
|
||||
//
|
||||
// The letters S and K are special because they map to 3 runes, not just 2:
|
||||
// * S maps to s and to U+017F 'ſ' Latin small letter long s
|
||||
// * k maps to K and to U+212A 'K' Kelvin sign
|
||||
// See https://play.golang.org/p/tTxjOc0OGo
|
||||
//
|
||||
// The returned function is specialized for matching against s and
|
||||
// should only be given s. It's not curried for performance reasons.
|
||||
func foldFunc(s []byte) func(s, t []byte) bool {
|
||||
nonLetter := false
|
||||
special := false // special letter
|
||||
for _, b := range s {
|
||||
if b >= utf8.RuneSelf {
|
||||
return bytes.EqualFold
|
||||
}
|
||||
upper := b & caseMask
|
||||
if upper < 'A' || upper > 'Z' {
|
||||
nonLetter = true
|
||||
} else if upper == 'K' || upper == 'S' {
|
||||
// See above for why these letters are special.
|
||||
special = true
|
||||
}
|
||||
}
|
||||
if special {
|
||||
return equalFoldRight
|
||||
}
|
||||
if nonLetter {
|
||||
return asciiEqualFold
|
||||
}
|
||||
return simpleLetterEqualFold
|
||||
}
|
||||
|
||||
// equalFoldRight is a specialization of bytes.EqualFold when s is
|
||||
// known to be all ASCII (including punctuation), but contains an 's',
|
||||
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
|
||||
// See comments on foldFunc.
|
||||
func equalFoldRight(s, t []byte) bool {
|
||||
for _, sb := range s {
|
||||
if len(t) == 0 {
|
||||
return false
|
||||
}
|
||||
tb := t[0]
|
||||
if tb < utf8.RuneSelf {
|
||||
if sb != tb {
|
||||
sbUpper := sb & caseMask
|
||||
if 'A' <= sbUpper && sbUpper <= 'Z' {
|
||||
if sbUpper != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
t = t[1:]
|
||||
continue
|
||||
}
|
||||
// sb is ASCII and t is not. t must be either kelvin
|
||||
// sign or long s; sb must be s, S, k, or K.
|
||||
tr, size := utf8.DecodeRune(t)
|
||||
switch sb {
|
||||
case 's', 'S':
|
||||
if tr != smallLongEss {
|
||||
return false
|
||||
}
|
||||
case 'k', 'K':
|
||||
if tr != kelvin {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
t = t[size:]
|
||||
|
||||
}
|
||||
if len(t) > 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// asciiEqualFold is a specialization of bytes.EqualFold for use when
|
||||
// s is all ASCII (but may contain non-letters) and contains no
|
||||
// special-folding letters.
|
||||
// See comments on foldFunc.
|
||||
func asciiEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, sb := range s {
|
||||
tb := t[i]
|
||||
if sb == tb {
|
||||
continue
|
||||
}
|
||||
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
|
||||
if sb&caseMask != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
|
||||
// use when s is all ASCII letters (no underscores, etc) and also
|
||||
// doesn't contain 'k', 'K', 's', or 'S'.
|
||||
// See comments on foldFunc.
|
||||
func simpleLetterEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, b := range s {
|
||||
if b&caseMask != t[i]&caseMask {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
116
vendor/github.com/ajeddeloh/go-json/fold_test.go
generated
vendored
Normal file
116
vendor/github.com/ajeddeloh/go-json/fold_test.go
generated
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var foldTests = []struct {
|
||||
fn func(s, t []byte) bool
|
||||
s, t string
|
||||
want bool
|
||||
}{
|
||||
{equalFoldRight, "", "", true},
|
||||
{equalFoldRight, "a", "a", true},
|
||||
{equalFoldRight, "", "a", false},
|
||||
{equalFoldRight, "a", "", false},
|
||||
{equalFoldRight, "a", "A", true},
|
||||
{equalFoldRight, "AB", "ab", true},
|
||||
{equalFoldRight, "AB", "ac", false},
|
||||
{equalFoldRight, "sbkKc", "ſbKKc", true},
|
||||
{equalFoldRight, "SbKkc", "ſbKKc", true},
|
||||
{equalFoldRight, "SbKkc", "ſbKK", false},
|
||||
{equalFoldRight, "e", "é", false},
|
||||
{equalFoldRight, "s", "S", true},
|
||||
|
||||
{simpleLetterEqualFold, "", "", true},
|
||||
{simpleLetterEqualFold, "abc", "abc", true},
|
||||
{simpleLetterEqualFold, "abc", "ABC", true},
|
||||
{simpleLetterEqualFold, "abc", "ABCD", false},
|
||||
{simpleLetterEqualFold, "abc", "xxx", false},
|
||||
|
||||
{asciiEqualFold, "a_B", "A_b", true},
|
||||
{asciiEqualFold, "aa@", "aa`", false}, // verify 0x40 and 0x60 aren't case-equivalent
|
||||
}
|
||||
|
||||
func TestFold(t *testing.T) {
|
||||
for i, tt := range foldTests {
|
||||
if got := tt.fn([]byte(tt.s), []byte(tt.t)); got != tt.want {
|
||||
t.Errorf("%d. %q, %q = %v; want %v", i, tt.s, tt.t, got, tt.want)
|
||||
}
|
||||
truth := strings.EqualFold(tt.s, tt.t)
|
||||
if truth != tt.want {
|
||||
t.Errorf("strings.EqualFold doesn't agree with case %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFoldAgainstUnicode(t *testing.T) {
|
||||
const bufSize = 5
|
||||
buf1 := make([]byte, 0, bufSize)
|
||||
buf2 := make([]byte, 0, bufSize)
|
||||
var runes []rune
|
||||
for i := 0x20; i <= 0x7f; i++ {
|
||||
runes = append(runes, rune(i))
|
||||
}
|
||||
runes = append(runes, kelvin, smallLongEss)
|
||||
|
||||
funcs := []struct {
|
||||
name string
|
||||
fold func(s, t []byte) bool
|
||||
letter bool // must be ASCII letter
|
||||
simple bool // must be simple ASCII letter (not 'S' or 'K')
|
||||
}{
|
||||
{
|
||||
name: "equalFoldRight",
|
||||
fold: equalFoldRight,
|
||||
},
|
||||
{
|
||||
name: "asciiEqualFold",
|
||||
fold: asciiEqualFold,
|
||||
simple: true,
|
||||
},
|
||||
{
|
||||
name: "simpleLetterEqualFold",
|
||||
fold: simpleLetterEqualFold,
|
||||
simple: true,
|
||||
letter: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ff := range funcs {
|
||||
for _, r := range runes {
|
||||
if r >= utf8.RuneSelf {
|
||||
continue
|
||||
}
|
||||
if ff.letter && !isASCIILetter(byte(r)) {
|
||||
continue
|
||||
}
|
||||
if ff.simple && (r == 's' || r == 'S' || r == 'k' || r == 'K') {
|
||||
continue
|
||||
}
|
||||
for _, r2 := range runes {
|
||||
buf1 := append(buf1[:0], 'x')
|
||||
buf2 := append(buf2[:0], 'x')
|
||||
buf1 = buf1[:1+utf8.EncodeRune(buf1[1:bufSize], r)]
|
||||
buf2 = buf2[:1+utf8.EncodeRune(buf2[1:bufSize], r2)]
|
||||
buf1 = append(buf1, 'x')
|
||||
buf2 = append(buf2, 'x')
|
||||
want := bytes.EqualFold(buf1, buf2)
|
||||
if got := ff.fold(buf1, buf2); got != want {
|
||||
t.Errorf("%s(%q, %q) = %v; want %v", ff.name, buf1, buf2, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isASCIILetter(b byte) bool {
|
||||
return ('A' <= b && b <= 'Z') || ('a' <= b && b <= 'z')
|
||||
}
|
||||
137
vendor/github.com/ajeddeloh/go-json/indent.go
generated
vendored
Normal file
137
vendor/github.com/ajeddeloh/go-json/indent.go
generated
vendored
Normal file
@@ -0,0 +1,137 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import "bytes"
|
||||
|
||||
// Compact appends to dst the JSON-encoded src with
|
||||
// insignificant space characters elided.
|
||||
func Compact(dst *bytes.Buffer, src []byte) error {
|
||||
return compact(dst, src, false)
|
||||
}
|
||||
|
||||
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
|
||||
origLen := dst.Len()
|
||||
var scan scanner
|
||||
scan.reset()
|
||||
start := 0
|
||||
for i, c := range src {
|
||||
if escape && (c == '<' || c == '>' || c == '&') {
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
dst.WriteString(`\u00`)
|
||||
dst.WriteByte(hex[c>>4])
|
||||
dst.WriteByte(hex[c&0xF])
|
||||
start = i + 1
|
||||
}
|
||||
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
|
||||
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
dst.WriteString(`\u202`)
|
||||
dst.WriteByte(hex[src[i+2]&0xF])
|
||||
start = i + 3
|
||||
}
|
||||
v := scan.step(&scan, int(c))
|
||||
if v >= scanSkipSpace {
|
||||
if v == scanError {
|
||||
break
|
||||
}
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
dst.Truncate(origLen)
|
||||
return scan.err
|
||||
}
|
||||
if start < len(src) {
|
||||
dst.Write(src[start:])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
|
||||
dst.WriteByte('\n')
|
||||
dst.WriteString(prefix)
|
||||
for i := 0; i < depth; i++ {
|
||||
dst.WriteString(indent)
|
||||
}
|
||||
}
|
||||
|
||||
// Indent appends to dst an indented form of the JSON-encoded src.
|
||||
// Each element in a JSON object or array begins on a new,
|
||||
// indented line beginning with prefix followed by one or more
|
||||
// copies of indent according to the indentation nesting.
|
||||
// The data appended to dst does not begin with the prefix nor
|
||||
// any indentation, and has no trailing newline, to make it
|
||||
// easier to embed inside other formatted JSON data.
|
||||
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
|
||||
origLen := dst.Len()
|
||||
var scan scanner
|
||||
scan.reset()
|
||||
needIndent := false
|
||||
depth := 0
|
||||
for _, c := range src {
|
||||
scan.bytes++
|
||||
v := scan.step(&scan, int(c))
|
||||
if v == scanSkipSpace {
|
||||
continue
|
||||
}
|
||||
if v == scanError {
|
||||
break
|
||||
}
|
||||
if needIndent && v != scanEndObject && v != scanEndArray {
|
||||
needIndent = false
|
||||
depth++
|
||||
newline(dst, prefix, indent, depth)
|
||||
}
|
||||
|
||||
// Emit semantically uninteresting bytes
|
||||
// (in particular, punctuation in strings) unmodified.
|
||||
if v == scanContinue {
|
||||
dst.WriteByte(c)
|
||||
continue
|
||||
}
|
||||
|
||||
// Add spacing around real punctuation.
|
||||
switch c {
|
||||
case '{', '[':
|
||||
// delay indent so that empty object and array are formatted as {} and [].
|
||||
needIndent = true
|
||||
dst.WriteByte(c)
|
||||
|
||||
case ',':
|
||||
dst.WriteByte(c)
|
||||
newline(dst, prefix, indent, depth)
|
||||
|
||||
case ':':
|
||||
dst.WriteByte(c)
|
||||
dst.WriteByte(' ')
|
||||
|
||||
case '}', ']':
|
||||
if needIndent {
|
||||
// suppress indent in empty object/array
|
||||
needIndent = false
|
||||
} else {
|
||||
depth--
|
||||
newline(dst, prefix, indent, depth)
|
||||
}
|
||||
dst.WriteByte(c)
|
||||
|
||||
default:
|
||||
dst.WriteByte(c)
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
dst.Truncate(origLen)
|
||||
return scan.err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
630
vendor/github.com/ajeddeloh/go-json/scanner.go
generated
vendored
Normal file
630
vendor/github.com/ajeddeloh/go-json/scanner.go
generated
vendored
Normal file
@@ -0,0 +1,630 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
// JSON value parser state machine.
|
||||
// Just about at the limit of what is reasonable to write by hand.
|
||||
// Some parts are a bit tedious, but overall it nicely factors out the
|
||||
// otherwise common code from the multiple scanning functions
|
||||
// in this package (Compact, Indent, checkValid, nextValue, etc).
|
||||
//
|
||||
// This file starts with two simple examples using the scanner
|
||||
// before diving into the scanner itself.
|
||||
|
||||
import "strconv"
|
||||
|
||||
// checkValid verifies that data is valid JSON-encoded data.
|
||||
// scan is passed in for use by checkValid to avoid an allocation.
|
||||
func checkValid(data []byte, scan *scanner) error {
|
||||
scan.reset()
|
||||
for _, c := range data {
|
||||
scan.bytes++
|
||||
if scan.step(scan, int(c)) == scanError {
|
||||
return scan.err
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
return scan.err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextValue splits data after the next whole JSON value,
|
||||
// returning that value and the bytes that follow it as separate slices.
|
||||
// scan is passed in for use by nextValue to avoid an allocation.
|
||||
func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
|
||||
scan.reset()
|
||||
for i, c := range data {
|
||||
v := scan.step(scan, int(c))
|
||||
if v >= scanEndObject {
|
||||
switch v {
|
||||
// probe the scanner with a space to determine whether we will
|
||||
// get scanEnd on the next character. Otherwise, if the next character
|
||||
// is not a space, scanEndTop allocates a needless error.
|
||||
case scanEndObject, scanEndArray:
|
||||
if scan.step(scan, ' ') == scanEnd {
|
||||
return data[:i+1], data[i+1:], nil
|
||||
}
|
||||
case scanError:
|
||||
return nil, nil, scan.err
|
||||
case scanEnd:
|
||||
return data[0:i], data[i:], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
return nil, nil, scan.err
|
||||
}
|
||||
return data, nil, nil
|
||||
}
|
||||
|
||||
// A SyntaxError is a description of a JSON syntax error.
|
||||
type SyntaxError struct {
|
||||
msg string // description of error
|
||||
Offset int64 // error occurred after reading Offset bytes
|
||||
}
|
||||
|
||||
func (e *SyntaxError) Error() string { return e.msg }
|
||||
|
||||
// A scanner is a JSON scanning state machine.
|
||||
// Callers call scan.reset() and then pass bytes in one at a time
|
||||
// by calling scan.step(&scan, c) for each byte.
|
||||
// The return value, referred to as an opcode, tells the
|
||||
// caller about significant parsing events like beginning
|
||||
// and ending literals, objects, and arrays, so that the
|
||||
// caller can follow along if it wishes.
|
||||
// The return value scanEnd indicates that a single top-level
|
||||
// JSON value has been completed, *before* the byte that
|
||||
// just got passed in. (The indication must be delayed in order
|
||||
// to recognize the end of numbers: is 123 a whole value or
|
||||
// the beginning of 12345e+6?).
|
||||
type scanner struct {
|
||||
// The step is a func to be called to execute the next transition.
|
||||
// Also tried using an integer constant and a single func
|
||||
// with a switch, but using the func directly was 10% faster
|
||||
// on a 64-bit Mac Mini, and it's nicer to read.
|
||||
step func(*scanner, int) int
|
||||
|
||||
// Reached end of top-level value.
|
||||
endTop bool
|
||||
|
||||
// Stack of what we're in the middle of - array values, object keys, object values.
|
||||
parseState []int
|
||||
|
||||
// Error that happened, if any.
|
||||
err error
|
||||
|
||||
// 1-byte redo (see undo method)
|
||||
redo bool
|
||||
redoCode int
|
||||
redoState func(*scanner, int) int
|
||||
|
||||
// total bytes consumed, updated by decoder.Decode
|
||||
bytes int64
|
||||
}
|
||||
|
||||
// These values are returned by the state transition functions
|
||||
// assigned to scanner.state and the method scanner.eof.
|
||||
// They give details about the current state of the scan that
|
||||
// callers might be interested to know about.
|
||||
// It is okay to ignore the return value of any particular
|
||||
// call to scanner.state: if one call returns scanError,
|
||||
// every subsequent call will return scanError too.
|
||||
const (
|
||||
// Continue.
|
||||
scanContinue = iota // uninteresting byte
|
||||
scanBeginLiteral // end implied by next result != scanContinue
|
||||
scanBeginObject // begin object
|
||||
scanObjectKey // just finished object key (string)
|
||||
scanObjectValue // just finished non-last object value
|
||||
scanEndObject // end object (implies scanObjectValue if possible)
|
||||
scanBeginArray // begin array
|
||||
scanArrayValue // just finished array value
|
||||
scanEndArray // end array (implies scanArrayValue if possible)
|
||||
scanSkipSpace // space byte; can skip; known to be last "continue" result
|
||||
|
||||
// Stop.
|
||||
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
|
||||
scanError // hit an error, scanner.err.
|
||||
)
|
||||
|
||||
// These values are stored in the parseState stack.
|
||||
// They give the current state of a composite value
|
||||
// being scanned. If the parser is inside a nested value
|
||||
// the parseState describes the nested state, outermost at entry 0.
|
||||
const (
|
||||
parseObjectKey = iota // parsing object key (before colon)
|
||||
parseObjectValue // parsing object value (after colon)
|
||||
parseArrayValue // parsing array value
|
||||
)
|
||||
|
||||
// reset prepares the scanner for use.
|
||||
// It must be called before calling s.step.
|
||||
func (s *scanner) reset() {
|
||||
s.step = stateBeginValue
|
||||
s.parseState = s.parseState[0:0]
|
||||
s.err = nil
|
||||
s.redo = false
|
||||
s.endTop = false
|
||||
}
|
||||
|
||||
// eof tells the scanner that the end of input has been reached.
|
||||
// It returns a scan status just as s.step does.
|
||||
func (s *scanner) eof() int {
|
||||
if s.err != nil {
|
||||
return scanError
|
||||
}
|
||||
if s.endTop {
|
||||
return scanEnd
|
||||
}
|
||||
s.step(s, ' ')
|
||||
if s.endTop {
|
||||
return scanEnd
|
||||
}
|
||||
if s.err == nil {
|
||||
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
|
||||
}
|
||||
return scanError
|
||||
}
|
||||
|
||||
// pushParseState pushes a new parse state p onto the parse stack.
|
||||
func (s *scanner) pushParseState(p int) {
|
||||
s.parseState = append(s.parseState, p)
|
||||
}
|
||||
|
||||
// popParseState pops a parse state (already obtained) off the stack
|
||||
// and updates s.step accordingly.
|
||||
func (s *scanner) popParseState() {
|
||||
n := len(s.parseState) - 1
|
||||
s.parseState = s.parseState[0:n]
|
||||
s.redo = false
|
||||
if n == 0 {
|
||||
s.step = stateEndTop
|
||||
s.endTop = true
|
||||
} else {
|
||||
s.step = stateEndValue
|
||||
}
|
||||
}
|
||||
|
||||
func isSpace(c rune) bool {
|
||||
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
|
||||
}
|
||||
|
||||
// stateBeginValueOrEmpty is the state after reading `[`.
|
||||
func stateBeginValueOrEmpty(s *scanner, c int) int {
|
||||
if c <= ' ' && isSpace(rune(c)) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == ']' {
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
return stateBeginValue(s, c)
|
||||
}
|
||||
|
||||
// stateBeginValue is the state at the beginning of the input.
|
||||
func stateBeginValue(s *scanner, c int) int {
|
||||
if c <= ' ' && isSpace(rune(c)) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
switch c {
|
||||
case '{':
|
||||
s.step = stateBeginStringOrEmpty
|
||||
s.pushParseState(parseObjectKey)
|
||||
return scanBeginObject
|
||||
case '[':
|
||||
s.step = stateBeginValueOrEmpty
|
||||
s.pushParseState(parseArrayValue)
|
||||
return scanBeginArray
|
||||
case '"':
|
||||
s.step = stateInString
|
||||
return scanBeginLiteral
|
||||
case '-':
|
||||
s.step = stateNeg
|
||||
return scanBeginLiteral
|
||||
case '0': // beginning of 0.123
|
||||
s.step = state0
|
||||
return scanBeginLiteral
|
||||
case 't': // beginning of true
|
||||
s.step = stateT
|
||||
return scanBeginLiteral
|
||||
case 'f': // beginning of false
|
||||
s.step = stateF
|
||||
return scanBeginLiteral
|
||||
case 'n': // beginning of null
|
||||
s.step = stateN
|
||||
return scanBeginLiteral
|
||||
}
|
||||
if '1' <= c && c <= '9' { // beginning of 1234.5
|
||||
s.step = state1
|
||||
return scanBeginLiteral
|
||||
}
|
||||
return s.error(c, "looking for beginning of value")
|
||||
}
|
||||
|
||||
// stateBeginStringOrEmpty is the state after reading `{`.
|
||||
func stateBeginStringOrEmpty(s *scanner, c int) int {
|
||||
if c <= ' ' && isSpace(rune(c)) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == '}' {
|
||||
n := len(s.parseState)
|
||||
s.parseState[n-1] = parseObjectValue
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
return stateBeginString(s, c)
|
||||
}
|
||||
|
||||
// stateBeginString is the state after reading `{"key": value,`.
|
||||
func stateBeginString(s *scanner, c int) int {
|
||||
if c <= ' ' && isSpace(rune(c)) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == '"' {
|
||||
s.step = stateInString
|
||||
return scanBeginLiteral
|
||||
}
|
||||
return s.error(c, "looking for beginning of object key string")
|
||||
}
|
||||
|
||||
// stateEndValue is the state after completing a value,
|
||||
// such as after reading `{}` or `true` or `["x"`.
|
||||
func stateEndValue(s *scanner, c int) int {
|
||||
n := len(s.parseState)
|
||||
if n == 0 {
|
||||
// Completed top-level before the current byte.
|
||||
s.step = stateEndTop
|
||||
s.endTop = true
|
||||
return stateEndTop(s, c)
|
||||
}
|
||||
if c <= ' ' && isSpace(rune(c)) {
|
||||
s.step = stateEndValue
|
||||
return scanSkipSpace
|
||||
}
|
||||
ps := s.parseState[n-1]
|
||||
switch ps {
|
||||
case parseObjectKey:
|
||||
if c == ':' {
|
||||
s.parseState[n-1] = parseObjectValue
|
||||
s.step = stateBeginValue
|
||||
return scanObjectKey
|
||||
}
|
||||
return s.error(c, "after object key")
|
||||
case parseObjectValue:
|
||||
if c == ',' {
|
||||
s.parseState[n-1] = parseObjectKey
|
||||
s.step = stateBeginString
|
||||
return scanObjectValue
|
||||
}
|
||||
if c == '}' {
|
||||
s.popParseState()
|
||||
return scanEndObject
|
||||
}
|
||||
return s.error(c, "after object key:value pair")
|
||||
case parseArrayValue:
|
||||
if c == ',' {
|
||||
s.step = stateBeginValue
|
||||
return scanArrayValue
|
||||
}
|
||||
if c == ']' {
|
||||
s.popParseState()
|
||||
return scanEndArray
|
||||
}
|
||||
return s.error(c, "after array element")
|
||||
}
|
||||
return s.error(c, "")
|
||||
}
|
||||
|
||||
// stateEndTop is the state after finishing the top-level value,
|
||||
// such as after reading `{}` or `[1,2,3]`.
|
||||
// Only space characters should be seen now.
|
||||
func stateEndTop(s *scanner, c int) int {
|
||||
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
|
||||
// Complain about non-space byte on next call.
|
||||
s.error(c, "after top-level value")
|
||||
}
|
||||
return scanEnd
|
||||
}
|
||||
|
||||
// stateInString is the state after reading `"`.
|
||||
func stateInString(s *scanner, c int) int {
|
||||
if c == '"' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
if c == '\\' {
|
||||
s.step = stateInStringEsc
|
||||
return scanContinue
|
||||
}
|
||||
if c < 0x20 {
|
||||
return s.error(c, "in string literal")
|
||||
}
|
||||
return scanContinue
|
||||
}
|
||||
|
||||
// stateInStringEsc is the state after reading `"\` during a quoted string.
|
||||
func stateInStringEsc(s *scanner, c int) int {
|
||||
switch c {
|
||||
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
|
||||
s.step = stateInString
|
||||
return scanContinue
|
||||
}
|
||||
if c == 'u' {
|
||||
s.step = stateInStringEscU
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in string escape code")
|
||||
}
|
||||
|
||||
// stateInStringEscU is the state after reading `"\u` during a quoted string.
|
||||
func stateInStringEscU(s *scanner, c int) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU1
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
|
||||
func stateInStringEscU1(s *scanner, c int) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU12
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
|
||||
func stateInStringEscU12(s *scanner, c int) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU123
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
|
||||
func stateInStringEscU123(s *scanner, c int) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInString
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateNeg is the state after reading `-` during a number.
|
||||
func stateNeg(s *scanner, c int) int {
|
||||
if c == '0' {
|
||||
s.step = state0
|
||||
return scanContinue
|
||||
}
|
||||
if '1' <= c && c <= '9' {
|
||||
s.step = state1
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in numeric literal")
|
||||
}
|
||||
|
||||
// state1 is the state after reading a non-zero integer during a number,
|
||||
// such as after reading `1` or `100` but not `0`.
|
||||
func state1(s *scanner, c int) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = state1
|
||||
return scanContinue
|
||||
}
|
||||
return state0(s, c)
|
||||
}
|
||||
|
||||
// state0 is the state after reading `0` during a number.
|
||||
func state0(s *scanner, c int) int {
|
||||
if c == '.' {
|
||||
s.step = stateDot
|
||||
return scanContinue
|
||||
}
|
||||
if c == 'e' || c == 'E' {
|
||||
s.step = stateE
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateDot is the state after reading the integer and decimal point in a number,
|
||||
// such as after reading `1.`.
|
||||
func stateDot(s *scanner, c int) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateDot0
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "after decimal point in numeric literal")
|
||||
}
|
||||
|
||||
// stateDot0 is the state after reading the integer, decimal point, and subsequent
|
||||
// digits of a number, such as after reading `3.14`.
|
||||
func stateDot0(s *scanner, c int) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateDot0
|
||||
return scanContinue
|
||||
}
|
||||
if c == 'e' || c == 'E' {
|
||||
s.step = stateE
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateE is the state after reading the mantissa and e in a number,
|
||||
// such as after reading `314e` or `0.314e`.
|
||||
func stateE(s *scanner, c int) int {
|
||||
if c == '+' {
|
||||
s.step = stateESign
|
||||
return scanContinue
|
||||
}
|
||||
if c == '-' {
|
||||
s.step = stateESign
|
||||
return scanContinue
|
||||
}
|
||||
return stateESign(s, c)
|
||||
}
|
||||
|
||||
// stateESign is the state after reading the mantissa, e, and sign in a number,
|
||||
// such as after reading `314e-` or `0.314e+`.
|
||||
func stateESign(s *scanner, c int) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateE0
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in exponent of numeric literal")
|
||||
}
|
||||
|
||||
// stateE0 is the state after reading the mantissa, e, optional sign,
|
||||
// and at least one digit of the exponent in a number,
|
||||
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
|
||||
func stateE0(s *scanner, c int) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateE0
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateT is the state after reading `t`.
|
||||
func stateT(s *scanner, c int) int {
|
||||
if c == 'r' {
|
||||
s.step = stateTr
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'r')")
|
||||
}
|
||||
|
||||
// stateTr is the state after reading `tr`.
|
||||
func stateTr(s *scanner, c int) int {
|
||||
if c == 'u' {
|
||||
s.step = stateTru
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'u')")
|
||||
}
|
||||
|
||||
// stateTru is the state after reading `tru`.
|
||||
func stateTru(s *scanner, c int) int {
|
||||
if c == 'e' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'e')")
|
||||
}
|
||||
|
||||
// stateF is the state after reading `f`.
|
||||
func stateF(s *scanner, c int) int {
|
||||
if c == 'a' {
|
||||
s.step = stateFa
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'a')")
|
||||
}
|
||||
|
||||
// stateFa is the state after reading `fa`.
|
||||
func stateFa(s *scanner, c int) int {
|
||||
if c == 'l' {
|
||||
s.step = stateFal
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateFal is the state after reading `fal`.
|
||||
func stateFal(s *scanner, c int) int {
|
||||
if c == 's' {
|
||||
s.step = stateFals
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 's')")
|
||||
}
|
||||
|
||||
// stateFals is the state after reading `fals`.
|
||||
func stateFals(s *scanner, c int) int {
|
||||
if c == 'e' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'e')")
|
||||
}
|
||||
|
||||
// stateN is the state after reading `n`.
|
||||
func stateN(s *scanner, c int) int {
|
||||
if c == 'u' {
|
||||
s.step = stateNu
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'u')")
|
||||
}
|
||||
|
||||
// stateNu is the state after reading `nu`.
|
||||
func stateNu(s *scanner, c int) int {
|
||||
if c == 'l' {
|
||||
s.step = stateNul
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateNul is the state after reading `nul`.
|
||||
func stateNul(s *scanner, c int) int {
|
||||
if c == 'l' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateError is the state after reaching a syntax error,
|
||||
// such as after reading `[1}` or `5.1.2`.
|
||||
func stateError(s *scanner, c int) int {
|
||||
return scanError
|
||||
}
|
||||
|
||||
// error records an error and switches to the error state.
|
||||
func (s *scanner) error(c int, context string) int {
|
||||
s.step = stateError
|
||||
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
|
||||
return scanError
|
||||
}
|
||||
|
||||
// quoteChar formats c as a quoted character literal
|
||||
func quoteChar(c int) string {
|
||||
// special cases - different from quoted strings
|
||||
if c == '\'' {
|
||||
return `'\''`
|
||||
}
|
||||
if c == '"' {
|
||||
return `'"'`
|
||||
}
|
||||
|
||||
// use quoted string with different quotation marks
|
||||
s := strconv.Quote(string(c))
|
||||
return "'" + s[1:len(s)-1] + "'"
|
||||
}
|
||||
|
||||
// undo causes the scanner to return scanCode from the next state transition.
|
||||
// This gives callers a simple 1-byte undo mechanism.
|
||||
func (s *scanner) undo(scanCode int) {
|
||||
if s.redo {
|
||||
panic("json: invalid use of scanner")
|
||||
}
|
||||
s.redoCode = scanCode
|
||||
s.redoState = s.step
|
||||
s.step = stateRedo
|
||||
s.redo = true
|
||||
}
|
||||
|
||||
// stateRedo helps implement the scanner's 1-byte undo.
|
||||
func stateRedo(s *scanner, c int) int {
|
||||
s.redo = false
|
||||
s.step = s.redoState
|
||||
return s.redoCode
|
||||
}
|
||||
316
vendor/github.com/ajeddeloh/go-json/scanner_test.go
generated
vendored
Normal file
316
vendor/github.com/ajeddeloh/go-json/scanner_test.go
generated
vendored
Normal file
@@ -0,0 +1,316 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Tests of simple examples.
|
||||
|
||||
type example struct {
|
||||
compact string
|
||||
indent string
|
||||
}
|
||||
|
||||
var examples = []example{
|
||||
{`1`, `1`},
|
||||
{`{}`, `{}`},
|
||||
{`[]`, `[]`},
|
||||
{`{"":2}`, "{\n\t\"\": 2\n}"},
|
||||
{`[3]`, "[\n\t3\n]"},
|
||||
{`[1,2,3]`, "[\n\t1,\n\t2,\n\t3\n]"},
|
||||
{`{"x":1}`, "{\n\t\"x\": 1\n}"},
|
||||
{ex1, ex1i},
|
||||
}
|
||||
|
||||
var ex1 = `[true,false,null,"x",1,1.5,0,-5e+2]`
|
||||
|
||||
var ex1i = `[
|
||||
true,
|
||||
false,
|
||||
null,
|
||||
"x",
|
||||
1,
|
||||
1.5,
|
||||
0,
|
||||
-5e+2
|
||||
]`
|
||||
|
||||
func TestCompact(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
for _, tt := range examples {
|
||||
buf.Reset()
|
||||
if err := Compact(&buf, []byte(tt.compact)); err != nil {
|
||||
t.Errorf("Compact(%#q): %v", tt.compact, err)
|
||||
} else if s := buf.String(); s != tt.compact {
|
||||
t.Errorf("Compact(%#q) = %#q, want original", tt.compact, s)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
if err := Compact(&buf, []byte(tt.indent)); err != nil {
|
||||
t.Errorf("Compact(%#q): %v", tt.indent, err)
|
||||
continue
|
||||
} else if s := buf.String(); s != tt.compact {
|
||||
t.Errorf("Compact(%#q) = %#q, want %#q", tt.indent, s, tt.compact)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactSeparators(t *testing.T) {
|
||||
// U+2028 and U+2029 should be escaped inside strings.
|
||||
// They should not appear outside strings.
|
||||
tests := []struct {
|
||||
in, compact string
|
||||
}{
|
||||
{"{\"\u2028\": 1}", `{"\u2028":1}`},
|
||||
{"{\"\u2029\" :2}", `{"\u2029":2}`},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
var buf bytes.Buffer
|
||||
if err := Compact(&buf, []byte(tt.in)); err != nil {
|
||||
t.Errorf("Compact(%q): %v", tt.in, err)
|
||||
} else if s := buf.String(); s != tt.compact {
|
||||
t.Errorf("Compact(%q) = %q, want %q", tt.in, s, tt.compact)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndent(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
for _, tt := range examples {
|
||||
buf.Reset()
|
||||
if err := Indent(&buf, []byte(tt.indent), "", "\t"); err != nil {
|
||||
t.Errorf("Indent(%#q): %v", tt.indent, err)
|
||||
} else if s := buf.String(); s != tt.indent {
|
||||
t.Errorf("Indent(%#q) = %#q, want original", tt.indent, s)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
if err := Indent(&buf, []byte(tt.compact), "", "\t"); err != nil {
|
||||
t.Errorf("Indent(%#q): %v", tt.compact, err)
|
||||
continue
|
||||
} else if s := buf.String(); s != tt.indent {
|
||||
t.Errorf("Indent(%#q) = %#q, want %#q", tt.compact, s, tt.indent)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests of a large random structure.
|
||||
|
||||
func TestCompactBig(t *testing.T) {
|
||||
initBig()
|
||||
var buf bytes.Buffer
|
||||
if err := Compact(&buf, jsonBig); err != nil {
|
||||
t.Fatalf("Compact: %v", err)
|
||||
}
|
||||
b := buf.Bytes()
|
||||
if !bytes.Equal(b, jsonBig) {
|
||||
t.Error("Compact(jsonBig) != jsonBig")
|
||||
diff(t, b, jsonBig)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndentBig(t *testing.T) {
|
||||
initBig()
|
||||
var buf bytes.Buffer
|
||||
if err := Indent(&buf, jsonBig, "", "\t"); err != nil {
|
||||
t.Fatalf("Indent1: %v", err)
|
||||
}
|
||||
b := buf.Bytes()
|
||||
if len(b) == len(jsonBig) {
|
||||
// jsonBig is compact (no unnecessary spaces);
|
||||
// indenting should make it bigger
|
||||
t.Fatalf("Indent(jsonBig) did not get bigger")
|
||||
}
|
||||
|
||||
// should be idempotent
|
||||
var buf1 bytes.Buffer
|
||||
if err := Indent(&buf1, b, "", "\t"); err != nil {
|
||||
t.Fatalf("Indent2: %v", err)
|
||||
}
|
||||
b1 := buf1.Bytes()
|
||||
if !bytes.Equal(b1, b) {
|
||||
t.Error("Indent(Indent(jsonBig)) != Indent(jsonBig)")
|
||||
diff(t, b1, b)
|
||||
return
|
||||
}
|
||||
|
||||
// should get back to original
|
||||
buf1.Reset()
|
||||
if err := Compact(&buf1, b); err != nil {
|
||||
t.Fatalf("Compact: %v", err)
|
||||
}
|
||||
b1 = buf1.Bytes()
|
||||
if !bytes.Equal(b1, jsonBig) {
|
||||
t.Error("Compact(Indent(jsonBig)) != jsonBig")
|
||||
diff(t, b1, jsonBig)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
type indentErrorTest struct {
|
||||
in string
|
||||
err error
|
||||
}
|
||||
|
||||
var indentErrorTests = []indentErrorTest{
|
||||
{`{"X": "foo", "Y"}`, &SyntaxError{"invalid character '}' after object key", 17}},
|
||||
{`{"X": "foo" "Y": "bar"}`, &SyntaxError{"invalid character '\"' after object key:value pair", 13}},
|
||||
}
|
||||
|
||||
func TestIndentErrors(t *testing.T) {
|
||||
for i, tt := range indentErrorTests {
|
||||
slice := make([]uint8, 0)
|
||||
buf := bytes.NewBuffer(slice)
|
||||
if err := Indent(buf, []uint8(tt.in), "", ""); err != nil {
|
||||
if !reflect.DeepEqual(err, tt.err) {
|
||||
t.Errorf("#%d: Indent: %#v", i, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNextValueBig(t *testing.T) {
|
||||
initBig()
|
||||
var scan scanner
|
||||
item, rest, err := nextValue(jsonBig, &scan)
|
||||
if err != nil {
|
||||
t.Fatalf("nextValue: %s", err)
|
||||
}
|
||||
if len(item) != len(jsonBig) || &item[0] != &jsonBig[0] {
|
||||
t.Errorf("invalid item: %d %d", len(item), len(jsonBig))
|
||||
}
|
||||
if len(rest) != 0 {
|
||||
t.Errorf("invalid rest: %d", len(rest))
|
||||
}
|
||||
|
||||
item, rest, err = nextValue(append(jsonBig, "HELLO WORLD"...), &scan)
|
||||
if err != nil {
|
||||
t.Fatalf("nextValue extra: %s", err)
|
||||
}
|
||||
if len(item) != len(jsonBig) {
|
||||
t.Errorf("invalid item: %d %d", len(item), len(jsonBig))
|
||||
}
|
||||
if string(rest) != "HELLO WORLD" {
|
||||
t.Errorf("invalid rest: %d", len(rest))
|
||||
}
|
||||
}
|
||||
|
||||
var benchScan scanner
|
||||
|
||||
func BenchmarkSkipValue(b *testing.B) {
|
||||
initBig()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
nextValue(jsonBig, &benchScan)
|
||||
}
|
||||
b.SetBytes(int64(len(jsonBig)))
|
||||
}
|
||||
|
||||
func diff(t *testing.T, a, b []byte) {
|
||||
for i := 0; ; i++ {
|
||||
if i >= len(a) || i >= len(b) || a[i] != b[i] {
|
||||
j := i - 10
|
||||
if j < 0 {
|
||||
j = 0
|
||||
}
|
||||
t.Errorf("diverge at %d: «%s» vs «%s»", i, trim(a[j:]), trim(b[j:]))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func trim(b []byte) []byte {
|
||||
if len(b) > 20 {
|
||||
return b[0:20]
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Generate a random JSON object.
|
||||
|
||||
var jsonBig []byte
|
||||
|
||||
func initBig() {
|
||||
n := 10000
|
||||
if testing.Short() {
|
||||
n = 100
|
||||
}
|
||||
b, err := Marshal(genValue(n))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
jsonBig = b
|
||||
}
|
||||
|
||||
func genValue(n int) interface{} {
|
||||
if n > 1 {
|
||||
switch rand.Intn(2) {
|
||||
case 0:
|
||||
return genArray(n)
|
||||
case 1:
|
||||
return genMap(n)
|
||||
}
|
||||
}
|
||||
switch rand.Intn(3) {
|
||||
case 0:
|
||||
return rand.Intn(2) == 0
|
||||
case 1:
|
||||
return rand.NormFloat64()
|
||||
case 2:
|
||||
return genString(30)
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func genString(stddev float64) string {
|
||||
n := int(math.Abs(rand.NormFloat64()*stddev + stddev/2))
|
||||
c := make([]rune, n)
|
||||
for i := range c {
|
||||
f := math.Abs(rand.NormFloat64()*64 + 32)
|
||||
if f > 0x10ffff {
|
||||
f = 0x10ffff
|
||||
}
|
||||
c[i] = rune(f)
|
||||
}
|
||||
return string(c)
|
||||
}
|
||||
|
||||
func genArray(n int) []interface{} {
|
||||
f := int(math.Abs(rand.NormFloat64()) * math.Min(10, float64(n/2)))
|
||||
if f > n {
|
||||
f = n
|
||||
}
|
||||
if f < 1 {
|
||||
f = 1
|
||||
}
|
||||
x := make([]interface{}, f)
|
||||
for i := range x {
|
||||
x[i] = genValue(((i+1)*n)/f - (i*n)/f)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func genMap(n int) map[string]interface{} {
|
||||
f := int(math.Abs(rand.NormFloat64()) * math.Min(10, float64(n/2)))
|
||||
if f > n {
|
||||
f = n
|
||||
}
|
||||
if n > 0 && f == 0 {
|
||||
f = 1
|
||||
}
|
||||
x := make(map[string]interface{})
|
||||
for i := 0; i < f; i++ {
|
||||
x[genString(10)] = genValue(((i+1)*n)/f - (i*n)/f)
|
||||
}
|
||||
return x
|
||||
}
|
||||
480
vendor/github.com/ajeddeloh/go-json/stream.go
generated
vendored
Normal file
480
vendor/github.com/ajeddeloh/go-json/stream.go
generated
vendored
Normal file
@@ -0,0 +1,480 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// A Decoder reads and decodes JSON objects from an input stream.
|
||||
type Decoder struct {
|
||||
r io.Reader
|
||||
buf []byte
|
||||
d decodeState
|
||||
scanp int // start of unread data in buf
|
||||
scan scanner
|
||||
err error
|
||||
|
||||
tokenState int
|
||||
tokenStack []int
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that reads from r.
|
||||
//
|
||||
// The decoder introduces its own buffering and may
|
||||
// read data from r beyond the JSON values requested.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{r: r}
|
||||
}
|
||||
|
||||
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
|
||||
// Number instead of as a float64.
|
||||
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
|
||||
|
||||
// Decode reads the next JSON-encoded value from its
|
||||
// input and stores it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for Unmarshal for details about
|
||||
// the conversion of JSON into a Go value.
|
||||
func (dec *Decoder) Decode(v interface{}) error {
|
||||
if dec.err != nil {
|
||||
return dec.err
|
||||
}
|
||||
|
||||
if err := dec.tokenPrepareForDecode(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !dec.tokenValueAllowed() {
|
||||
return &SyntaxError{msg: "not at beginning of value"}
|
||||
}
|
||||
|
||||
// Read whole value into buffer.
|
||||
n, err := dec.readValue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
|
||||
dec.scanp += n
|
||||
|
||||
// Don't save err from unmarshal into dec.err:
|
||||
// the connection is still usable since we read a complete JSON
|
||||
// object from it before the error happened.
|
||||
err = dec.d.unmarshal(v)
|
||||
|
||||
// fixup token streaming state
|
||||
dec.tokenValueEnd()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Buffered returns a reader of the data remaining in the Decoder's
|
||||
// buffer. The reader is valid until the next call to Decode.
|
||||
func (dec *Decoder) Buffered() io.Reader {
|
||||
return bytes.NewReader(dec.buf[dec.scanp:])
|
||||
}
|
||||
|
||||
// readValue reads a JSON value into dec.buf.
|
||||
// It returns the length of the encoding.
|
||||
func (dec *Decoder) readValue() (int, error) {
|
||||
dec.scan.reset()
|
||||
|
||||
scanp := dec.scanp
|
||||
var err error
|
||||
Input:
|
||||
for {
|
||||
// Look in the buffer for a new value.
|
||||
for i, c := range dec.buf[scanp:] {
|
||||
dec.scan.bytes++
|
||||
v := dec.scan.step(&dec.scan, int(c))
|
||||
if v == scanEnd {
|
||||
scanp += i
|
||||
break Input
|
||||
}
|
||||
// scanEnd is delayed one byte.
|
||||
// We might block trying to get that byte from src,
|
||||
// so instead invent a space byte.
|
||||
if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
|
||||
scanp += i + 1
|
||||
break Input
|
||||
}
|
||||
if v == scanError {
|
||||
dec.err = dec.scan.err
|
||||
return 0, dec.scan.err
|
||||
}
|
||||
}
|
||||
scanp = len(dec.buf)
|
||||
|
||||
// Did the last read have an error?
|
||||
// Delayed until now to allow buffer scan.
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if dec.scan.step(&dec.scan, ' ') == scanEnd {
|
||||
break Input
|
||||
}
|
||||
if nonSpace(dec.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}
|
||||
dec.err = err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n := scanp - dec.scanp
|
||||
err = dec.refill()
|
||||
scanp = dec.scanp + n
|
||||
}
|
||||
return scanp - dec.scanp, nil
|
||||
}
|
||||
|
||||
func (dec *Decoder) refill() error {
|
||||
// Make room to read more into the buffer.
|
||||
// First slide down data already consumed.
|
||||
if dec.scanp > 0 {
|
||||
n := copy(dec.buf, dec.buf[dec.scanp:])
|
||||
dec.buf = dec.buf[:n]
|
||||
dec.scanp = 0
|
||||
}
|
||||
|
||||
// Grow buffer if not large enough.
|
||||
const minRead = 512
|
||||
if cap(dec.buf)-len(dec.buf) < minRead {
|
||||
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
|
||||
copy(newBuf, dec.buf)
|
||||
dec.buf = newBuf
|
||||
}
|
||||
|
||||
// Read. Delay error for next iteration (after scan).
|
||||
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
|
||||
dec.buf = dec.buf[0 : len(dec.buf)+n]
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func nonSpace(b []byte) bool {
|
||||
for _, c := range b {
|
||||
if !isSpace(rune(c)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// An Encoder writes JSON objects to an output stream.
|
||||
type Encoder struct {
|
||||
w io.Writer
|
||||
err error
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{w: w}
|
||||
}
|
||||
|
||||
// Encode writes the JSON encoding of v to the stream,
|
||||
// followed by a newline character.
|
||||
//
|
||||
// See the documentation for Marshal for details about the
|
||||
// conversion of Go values to JSON.
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
if enc.err != nil {
|
||||
return enc.err
|
||||
}
|
||||
e := newEncodeState()
|
||||
err := e.marshal(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Terminate each value with a newline.
|
||||
// This makes the output look a little nicer
|
||||
// when debugging, and some kind of space
|
||||
// is required if the encoded value was a number,
|
||||
// so that the reader knows there aren't more
|
||||
// digits coming.
|
||||
e.WriteByte('\n')
|
||||
|
||||
if _, err = enc.w.Write(e.Bytes()); err != nil {
|
||||
enc.err = err
|
||||
}
|
||||
encodeStatePool.Put(e)
|
||||
return err
|
||||
}
|
||||
|
||||
// RawMessage is a raw encoded JSON object.
|
||||
// It implements Marshaler and Unmarshaler and can
|
||||
// be used to delay JSON decoding or precompute a JSON encoding.
|
||||
type RawMessage []byte
|
||||
|
||||
// MarshalJSON returns *m as the JSON encoding of m.
|
||||
func (m *RawMessage) MarshalJSON() ([]byte, error) {
|
||||
return *m, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON sets *m to a copy of data.
|
||||
func (m *RawMessage) UnmarshalJSON(data []byte) error {
|
||||
if m == nil {
|
||||
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
|
||||
}
|
||||
*m = append((*m)[0:0], data...)
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ Marshaler = (*RawMessage)(nil)
|
||||
var _ Unmarshaler = (*RawMessage)(nil)
|
||||
|
||||
// A Token holds a value of one of these types:
|
||||
//
|
||||
// Delim, for the four JSON delimiters [ ] { }
|
||||
// bool, for JSON booleans
|
||||
// float64, for JSON numbers
|
||||
// Number, for JSON numbers
|
||||
// string, for JSON string literals
|
||||
// nil, for JSON null
|
||||
//
|
||||
type Token interface{}
|
||||
|
||||
const (
|
||||
tokenTopValue = iota
|
||||
tokenArrayStart
|
||||
tokenArrayValue
|
||||
tokenArrayComma
|
||||
tokenObjectStart
|
||||
tokenObjectKey
|
||||
tokenObjectColon
|
||||
tokenObjectValue
|
||||
tokenObjectComma
|
||||
)
|
||||
|
||||
// advance tokenstate from a separator state to a value state
|
||||
func (dec *Decoder) tokenPrepareForDecode() error {
|
||||
// Note: Not calling peek before switch, to avoid
|
||||
// putting peek into the standard Decode path.
|
||||
// peek is only called when using the Token API.
|
||||
switch dec.tokenState {
|
||||
case tokenArrayComma:
|
||||
c, err := dec.peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c != ',' {
|
||||
return &SyntaxError{"expected comma after array element", 0}
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenArrayValue
|
||||
case tokenObjectColon:
|
||||
c, err := dec.peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c != ':' {
|
||||
return &SyntaxError{"expected colon after object key", 0}
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenObjectValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dec *Decoder) tokenValueAllowed() bool {
|
||||
switch dec.tokenState {
|
||||
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (dec *Decoder) tokenValueEnd() {
|
||||
switch dec.tokenState {
|
||||
case tokenArrayStart, tokenArrayValue:
|
||||
dec.tokenState = tokenArrayComma
|
||||
case tokenObjectValue:
|
||||
dec.tokenState = tokenObjectComma
|
||||
}
|
||||
}
|
||||
|
||||
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
|
||||
type Delim rune
|
||||
|
||||
func (d Delim) String() string {
|
||||
return string(d)
|
||||
}
|
||||
|
||||
// Token returns the next JSON token in the input stream.
|
||||
// At the end of the input stream, Token returns nil, io.EOF.
|
||||
//
|
||||
// Token guarantees that the delimiters [ ] { } it returns are
|
||||
// properly nested and matched: if Token encounters an unexpected
|
||||
// delimiter in the input, it will return an error.
|
||||
//
|
||||
// The input stream consists of basic JSON values—bool, string,
|
||||
// number, and null—along with delimiters [ ] { } of type Delim
|
||||
// to mark the start and end of arrays and objects.
|
||||
// Commas and colons are elided.
|
||||
func (dec *Decoder) Token() (Token, error) {
|
||||
for {
|
||||
c, err := dec.peek()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch c {
|
||||
case '[':
|
||||
if !dec.tokenValueAllowed() {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
|
||||
dec.tokenState = tokenArrayStart
|
||||
return Delim('['), nil
|
||||
|
||||
case ']':
|
||||
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
|
||||
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
||||
dec.tokenValueEnd()
|
||||
return Delim(']'), nil
|
||||
|
||||
case '{':
|
||||
if !dec.tokenValueAllowed() {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
|
||||
dec.tokenState = tokenObjectStart
|
||||
return Delim('{'), nil
|
||||
|
||||
case '}':
|
||||
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
|
||||
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
||||
dec.tokenValueEnd()
|
||||
return Delim('}'), nil
|
||||
|
||||
case ':':
|
||||
if dec.tokenState != tokenObjectColon {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenObjectValue
|
||||
continue
|
||||
|
||||
case ',':
|
||||
if dec.tokenState == tokenArrayComma {
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenArrayValue
|
||||
continue
|
||||
}
|
||||
if dec.tokenState == tokenObjectComma {
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenObjectKey
|
||||
continue
|
||||
}
|
||||
return dec.tokenError(c)
|
||||
|
||||
case '"':
|
||||
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
|
||||
var x string
|
||||
old := dec.tokenState
|
||||
dec.tokenState = tokenTopValue
|
||||
err := dec.Decode(&x)
|
||||
dec.tokenState = old
|
||||
if err != nil {
|
||||
clearOffset(err)
|
||||
return nil, err
|
||||
}
|
||||
dec.tokenState = tokenObjectColon
|
||||
return x, nil
|
||||
}
|
||||
fallthrough
|
||||
|
||||
default:
|
||||
if !dec.tokenValueAllowed() {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
var x interface{}
|
||||
if err := dec.Decode(&x); err != nil {
|
||||
clearOffset(err)
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func clearOffset(err error) {
|
||||
if s, ok := err.(*SyntaxError); ok {
|
||||
s.Offset = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (dec *Decoder) tokenError(c byte) (Token, error) {
|
||||
var context string
|
||||
switch dec.tokenState {
|
||||
case tokenTopValue:
|
||||
context = " looking for beginning of value"
|
||||
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
|
||||
context = " looking for beginning of value"
|
||||
case tokenArrayComma:
|
||||
context = " after array element"
|
||||
case tokenObjectKey:
|
||||
context = " looking for beginning of object key string"
|
||||
case tokenObjectColon:
|
||||
context = " after object key"
|
||||
case tokenObjectComma:
|
||||
context = " after object key:value pair"
|
||||
}
|
||||
return nil, &SyntaxError{"invalid character " + quoteChar(int(c)) + " " + context, 0}
|
||||
}
|
||||
|
||||
// More reports whether there is another element in the
|
||||
// current array or object being parsed.
|
||||
func (dec *Decoder) More() bool {
|
||||
c, err := dec.peek()
|
||||
return err == nil && c != ']' && c != '}'
|
||||
}
|
||||
|
||||
func (dec *Decoder) peek() (byte, error) {
|
||||
var err error
|
||||
for {
|
||||
for i := dec.scanp; i < len(dec.buf); i++ {
|
||||
c := dec.buf[i]
|
||||
if isSpace(rune(c)) {
|
||||
continue
|
||||
}
|
||||
dec.scanp = i
|
||||
return c, nil
|
||||
}
|
||||
// buffer has been scanned, now report any error
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dec.refill()
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
TODO
|
||||
|
||||
// EncodeToken writes the given JSON token to the stream.
|
||||
// It returns an error if the delimiters [ ] { } are not properly used.
|
||||
//
|
||||
// EncodeToken does not call Flush, because usually it is part of
|
||||
// a larger operation such as Encode, and those will call Flush when finished.
|
||||
// Callers that create an Encoder and then invoke EncodeToken directly,
|
||||
// without using Encode, need to call Flush when finished to ensure that
|
||||
// the JSON is written to the underlying writer.
|
||||
func (e *Encoder) EncodeToken(t Token) error {
|
||||
...
|
||||
}
|
||||
|
||||
*/
|
||||
354
vendor/github.com/ajeddeloh/go-json/stream_test.go
generated
vendored
Normal file
354
vendor/github.com/ajeddeloh/go-json/stream_test.go
generated
vendored
Normal file
@@ -0,0 +1,354 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Test values for the stream test.
|
||||
// One of each JSON kind.
|
||||
var streamTest = []interface{}{
|
||||
0.1,
|
||||
"hello",
|
||||
nil,
|
||||
true,
|
||||
false,
|
||||
[]interface{}{"a", "b", "c"},
|
||||
map[string]interface{}{"K": "Kelvin", "ß": "long s"},
|
||||
3.14, // another value to make sure something can follow map
|
||||
}
|
||||
|
||||
var streamEncoded = `0.1
|
||||
"hello"
|
||||
null
|
||||
true
|
||||
false
|
||||
["a","b","c"]
|
||||
{"ß":"long s","K":"Kelvin"}
|
||||
3.14
|
||||
`
|
||||
|
||||
func TestEncoder(t *testing.T) {
|
||||
for i := 0; i <= len(streamTest); i++ {
|
||||
var buf bytes.Buffer
|
||||
enc := NewEncoder(&buf)
|
||||
for j, v := range streamTest[0:i] {
|
||||
if err := enc.Encode(v); err != nil {
|
||||
t.Fatalf("encode #%d: %v", j, err)
|
||||
}
|
||||
}
|
||||
if have, want := buf.String(), nlines(streamEncoded, i); have != want {
|
||||
t.Errorf("encoding %d items: mismatch", i)
|
||||
diff(t, []byte(have), []byte(want))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecoder(t *testing.T) {
|
||||
for i := 0; i <= len(streamTest); i++ {
|
||||
// Use stream without newlines as input,
|
||||
// just to stress the decoder even more.
|
||||
// Our test input does not include back-to-back numbers.
|
||||
// Otherwise stripping the newlines would
|
||||
// merge two adjacent JSON values.
|
||||
var buf bytes.Buffer
|
||||
for _, c := range nlines(streamEncoded, i) {
|
||||
if c != '\n' {
|
||||
buf.WriteRune(c)
|
||||
}
|
||||
}
|
||||
out := make([]interface{}, i)
|
||||
dec := NewDecoder(&buf)
|
||||
for j := range out {
|
||||
if err := dec.Decode(&out[j]); err != nil {
|
||||
t.Fatalf("decode #%d/%d: %v", j, i, err)
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(out, streamTest[0:i]) {
|
||||
t.Errorf("decoding %d items: mismatch", i)
|
||||
for j := range out {
|
||||
if !reflect.DeepEqual(out[j], streamTest[j]) {
|
||||
t.Errorf("#%d: have %v want %v", j, out[j], streamTest[j])
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecoderBuffered(t *testing.T) {
|
||||
r := strings.NewReader(`{"Name": "Gopher"} extra `)
|
||||
var m struct {
|
||||
Name string
|
||||
}
|
||||
d := NewDecoder(r)
|
||||
err := d.Decode(&m)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if m.Name != "Gopher" {
|
||||
t.Errorf("Name = %q; want Gopher", m.Name)
|
||||
}
|
||||
rest, err := ioutil.ReadAll(d.Buffered())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if g, w := string(rest), " extra "; g != w {
|
||||
t.Errorf("Remaining = %q; want %q", g, w)
|
||||
}
|
||||
}
|
||||
|
||||
func nlines(s string, n int) string {
|
||||
if n <= 0 {
|
||||
return ""
|
||||
}
|
||||
for i, c := range s {
|
||||
if c == '\n' {
|
||||
if n--; n == 0 {
|
||||
return s[0 : i+1]
|
||||
}
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func TestRawMessage(t *testing.T) {
|
||||
// TODO(rsc): Should not need the * in *RawMessage
|
||||
var data struct {
|
||||
X float64
|
||||
Id *RawMessage
|
||||
Y float32
|
||||
}
|
||||
const raw = `["\u0056",null]`
|
||||
const msg = `{"X":0.1,"Id":["\u0056",null],"Y":0.2}`
|
||||
err := Unmarshal([]byte(msg), &data)
|
||||
if err != nil {
|
||||
t.Fatalf("Unmarshal: %v", err)
|
||||
}
|
||||
if string([]byte(*data.Id)) != raw {
|
||||
t.Fatalf("Raw mismatch: have %#q want %#q", []byte(*data.Id), raw)
|
||||
}
|
||||
b, err := Marshal(&data)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal: %v", err)
|
||||
}
|
||||
if string(b) != msg {
|
||||
t.Fatalf("Marshal: have %#q want %#q", b, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNullRawMessage(t *testing.T) {
|
||||
// TODO(rsc): Should not need the * in *RawMessage
|
||||
var data struct {
|
||||
X float64
|
||||
Id *RawMessage
|
||||
Y float32
|
||||
}
|
||||
data.Id = new(RawMessage)
|
||||
const msg = `{"X":0.1,"Id":null,"Y":0.2}`
|
||||
err := Unmarshal([]byte(msg), &data)
|
||||
if err != nil {
|
||||
t.Fatalf("Unmarshal: %v", err)
|
||||
}
|
||||
if data.Id != nil {
|
||||
t.Fatalf("Raw mismatch: have non-nil, want nil")
|
||||
}
|
||||
b, err := Marshal(&data)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal: %v", err)
|
||||
}
|
||||
if string(b) != msg {
|
||||
t.Fatalf("Marshal: have %#q want %#q", b, msg)
|
||||
}
|
||||
}
|
||||
|
||||
var blockingTests = []string{
|
||||
`{"x": 1}`,
|
||||
`[1, 2, 3]`,
|
||||
}
|
||||
|
||||
func TestBlocking(t *testing.T) {
|
||||
for _, enc := range blockingTests {
|
||||
r, w := net.Pipe()
|
||||
go w.Write([]byte(enc))
|
||||
var val interface{}
|
||||
|
||||
// If Decode reads beyond what w.Write writes above,
|
||||
// it will block, and the test will deadlock.
|
||||
if err := NewDecoder(r).Decode(&val); err != nil {
|
||||
t.Errorf("decoding %s: %v", enc, err)
|
||||
}
|
||||
r.Close()
|
||||
w.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncoderEncode(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
type T struct {
|
||||
X, Y string
|
||||
}
|
||||
v := &T{"foo", "bar"}
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := NewEncoder(ioutil.Discard).Encode(v); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type tokenStreamCase struct {
|
||||
json string
|
||||
expTokens []interface{}
|
||||
}
|
||||
|
||||
type decodeThis struct {
|
||||
v interface{}
|
||||
}
|
||||
|
||||
var tokenStreamCases []tokenStreamCase = []tokenStreamCase{
|
||||
// streaming token cases
|
||||
{json: `10`, expTokens: []interface{}{float64(10)}},
|
||||
{json: ` [10] `, expTokens: []interface{}{
|
||||
Delim('['), float64(10), Delim(']')}},
|
||||
{json: ` [false,10,"b"] `, expTokens: []interface{}{
|
||||
Delim('['), false, float64(10), "b", Delim(']')}},
|
||||
{json: `{ "a": 1 }`, expTokens: []interface{}{
|
||||
Delim('{'), "a", float64(1), Delim('}')}},
|
||||
{json: `{"a": 1, "b":"3"}`, expTokens: []interface{}{
|
||||
Delim('{'), "a", float64(1), "b", "3", Delim('}')}},
|
||||
{json: ` [{"a": 1},{"a": 2}] `, expTokens: []interface{}{
|
||||
Delim('['),
|
||||
Delim('{'), "a", float64(1), Delim('}'),
|
||||
Delim('{'), "a", float64(2), Delim('}'),
|
||||
Delim(']')}},
|
||||
{json: `{"obj": {"a": 1}}`, expTokens: []interface{}{
|
||||
Delim('{'), "obj", Delim('{'), "a", float64(1), Delim('}'),
|
||||
Delim('}')}},
|
||||
{json: `{"obj": [{"a": 1}]}`, expTokens: []interface{}{
|
||||
Delim('{'), "obj", Delim('['),
|
||||
Delim('{'), "a", float64(1), Delim('}'),
|
||||
Delim(']'), Delim('}')}},
|
||||
|
||||
// streaming tokens with intermittent Decode()
|
||||
{json: `{ "a": 1 }`, expTokens: []interface{}{
|
||||
Delim('{'), "a",
|
||||
decodeThis{float64(1)},
|
||||
Delim('}')}},
|
||||
{json: ` [ { "a" : 1 } ] `, expTokens: []interface{}{
|
||||
Delim('['),
|
||||
decodeThis{map[string]interface{}{"a": float64(1)}},
|
||||
Delim(']')}},
|
||||
{json: ` [{"a": 1},{"a": 2}] `, expTokens: []interface{}{
|
||||
Delim('['),
|
||||
decodeThis{map[string]interface{}{"a": float64(1)}},
|
||||
decodeThis{map[string]interface{}{"a": float64(2)}},
|
||||
Delim(']')}},
|
||||
{json: `{ "obj" : [ { "a" : 1 } ] }`, expTokens: []interface{}{
|
||||
Delim('{'), "obj", Delim('['),
|
||||
decodeThis{map[string]interface{}{"a": float64(1)}},
|
||||
Delim(']'), Delim('}')}},
|
||||
|
||||
{json: `{"obj": {"a": 1}}`, expTokens: []interface{}{
|
||||
Delim('{'), "obj",
|
||||
decodeThis{map[string]interface{}{"a": float64(1)}},
|
||||
Delim('}')}},
|
||||
{json: `{"obj": [{"a": 1}]}`, expTokens: []interface{}{
|
||||
Delim('{'), "obj",
|
||||
decodeThis{[]interface{}{
|
||||
map[string]interface{}{"a": float64(1)},
|
||||
}},
|
||||
Delim('}')}},
|
||||
{json: ` [{"a": 1} {"a": 2}] `, expTokens: []interface{}{
|
||||
Delim('['),
|
||||
decodeThis{map[string]interface{}{"a": float64(1)}},
|
||||
decodeThis{&SyntaxError{"expected comma after array element", 0}},
|
||||
}},
|
||||
{json: `{ "a" 1 }`, expTokens: []interface{}{
|
||||
Delim('{'), "a",
|
||||
decodeThis{&SyntaxError{"expected colon after object key", 0}},
|
||||
}},
|
||||
}
|
||||
|
||||
func TestDecodeInStream(t *testing.T) {
|
||||
|
||||
for ci, tcase := range tokenStreamCases {
|
||||
|
||||
dec := NewDecoder(strings.NewReader(tcase.json))
|
||||
for i, etk := range tcase.expTokens {
|
||||
|
||||
var tk interface{}
|
||||
var err error
|
||||
|
||||
if dt, ok := etk.(decodeThis); ok {
|
||||
etk = dt.v
|
||||
err = dec.Decode(&tk)
|
||||
} else {
|
||||
tk, err = dec.Token()
|
||||
}
|
||||
if experr, ok := etk.(error); ok {
|
||||
if err == nil || err.Error() != experr.Error() {
|
||||
t.Errorf("case %v: Expected error %v in %q, but was %v", ci, experr, tcase.json, err)
|
||||
}
|
||||
break
|
||||
} else if err == io.EOF {
|
||||
t.Errorf("case %v: Unexpected EOF in %q", ci, tcase.json)
|
||||
break
|
||||
} else if err != nil {
|
||||
t.Errorf("case %v: Unexpected error '%v' in %q", ci, err, tcase.json)
|
||||
break
|
||||
}
|
||||
if !reflect.DeepEqual(tk, etk) {
|
||||
t.Errorf(`case %v: %q @ %v expected %T(%v) was %T(%v)`, ci, tcase.json, i, etk, etk, tk, tk)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Test from golang.org/issue/11893
|
||||
func TestHTTPDecoding(t *testing.T) {
|
||||
const raw = `{ "foo": "bar" }`
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte(raw))
|
||||
}))
|
||||
defer ts.Close()
|
||||
res, err := http.Get(ts.URL)
|
||||
if err != nil {
|
||||
log.Fatalf("GET failed: %v", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
foo := struct {
|
||||
Foo string
|
||||
}{}
|
||||
|
||||
d := NewDecoder(res.Body)
|
||||
err = d.Decode(&foo)
|
||||
if err != nil {
|
||||
t.Fatalf("Decode: %v", err)
|
||||
}
|
||||
if foo.Foo != "bar" {
|
||||
t.Errorf("decoded %q; want \"bar\"", foo.Foo)
|
||||
}
|
||||
|
||||
// make sure we get the EOF the second time
|
||||
err = d.Decode(&foo)
|
||||
if err != io.EOF {
|
||||
t.Errorf("err = %v; want io.EOF", err)
|
||||
}
|
||||
}
|
||||
115
vendor/github.com/ajeddeloh/go-json/tagkey_test.go
generated
vendored
Normal file
115
vendor/github.com/ajeddeloh/go-json/tagkey_test.go
generated
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
type basicLatin2xTag struct {
|
||||
V string `json:"$%-/"`
|
||||
}
|
||||
|
||||
type basicLatin3xTag struct {
|
||||
V string `json:"0123456789"`
|
||||
}
|
||||
|
||||
type basicLatin4xTag struct {
|
||||
V string `json:"ABCDEFGHIJKLMO"`
|
||||
}
|
||||
|
||||
type basicLatin5xTag struct {
|
||||
V string `json:"PQRSTUVWXYZ_"`
|
||||
}
|
||||
|
||||
type basicLatin6xTag struct {
|
||||
V string `json:"abcdefghijklmno"`
|
||||
}
|
||||
|
||||
type basicLatin7xTag struct {
|
||||
V string `json:"pqrstuvwxyz"`
|
||||
}
|
||||
|
||||
type miscPlaneTag struct {
|
||||
V string `json:"色は匂へど"`
|
||||
}
|
||||
|
||||
type percentSlashTag struct {
|
||||
V string `json:"text/html%"` // https://golang.org/issue/2718
|
||||
}
|
||||
|
||||
type punctuationTag struct {
|
||||
V string `json:"!#$%&()*+-./:<=>?@[]^_{|}~"` // https://golang.org/issue/3546
|
||||
}
|
||||
|
||||
type emptyTag struct {
|
||||
W string
|
||||
}
|
||||
|
||||
type misnamedTag struct {
|
||||
X string `jsom:"Misnamed"`
|
||||
}
|
||||
|
||||
type badFormatTag struct {
|
||||
Y string `:"BadFormat"`
|
||||
}
|
||||
|
||||
type badCodeTag struct {
|
||||
Z string `json:" !\"#&'()*+,."`
|
||||
}
|
||||
|
||||
type spaceTag struct {
|
||||
Q string `json:"With space"`
|
||||
}
|
||||
|
||||
type unicodeTag struct {
|
||||
W string `json:"Ελλάδα"`
|
||||
}
|
||||
|
||||
var structTagObjectKeyTests = []struct {
|
||||
raw interface{}
|
||||
value string
|
||||
key string
|
||||
}{
|
||||
{basicLatin2xTag{"2x"}, "2x", "$%-/"},
|
||||
{basicLatin3xTag{"3x"}, "3x", "0123456789"},
|
||||
{basicLatin4xTag{"4x"}, "4x", "ABCDEFGHIJKLMO"},
|
||||
{basicLatin5xTag{"5x"}, "5x", "PQRSTUVWXYZ_"},
|
||||
{basicLatin6xTag{"6x"}, "6x", "abcdefghijklmno"},
|
||||
{basicLatin7xTag{"7x"}, "7x", "pqrstuvwxyz"},
|
||||
{miscPlaneTag{"いろはにほへと"}, "いろはにほへと", "色は匂へど"},
|
||||
{emptyTag{"Pour Moi"}, "Pour Moi", "W"},
|
||||
{misnamedTag{"Animal Kingdom"}, "Animal Kingdom", "X"},
|
||||
{badFormatTag{"Orfevre"}, "Orfevre", "Y"},
|
||||
{badCodeTag{"Reliable Man"}, "Reliable Man", "Z"},
|
||||
{percentSlashTag{"brut"}, "brut", "text/html%"},
|
||||
{punctuationTag{"Union Rags"}, "Union Rags", "!#$%&()*+-./:<=>?@[]^_{|}~"},
|
||||
{spaceTag{"Perreddu"}, "Perreddu", "With space"},
|
||||
{unicodeTag{"Loukanikos"}, "Loukanikos", "Ελλάδα"},
|
||||
}
|
||||
|
||||
func TestStructTagObjectKey(t *testing.T) {
|
||||
for _, tt := range structTagObjectKeyTests {
|
||||
b, err := Marshal(tt.raw)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal(%#q) failed: %v", tt.raw, err)
|
||||
}
|
||||
var f interface{}
|
||||
err = Unmarshal(b, &f)
|
||||
if err != nil {
|
||||
t.Fatalf("Unmarshal(%#q) failed: %v", b, err)
|
||||
}
|
||||
for i, v := range f.(map[string]interface{}) {
|
||||
switch i {
|
||||
case tt.key:
|
||||
if s, ok := v.(string); !ok || s != tt.value {
|
||||
t.Fatalf("Unexpected value: %#q, want %v", s, tt.value)
|
||||
}
|
||||
default:
|
||||
t.Fatalf("Unexpected key: %#q, from %#q", i, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
44
vendor/github.com/ajeddeloh/go-json/tags.go
generated
vendored
Normal file
44
vendor/github.com/ajeddeloh/go-json/tags.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// tagOptions is the string following a comma in a struct field's "json"
|
||||
// tag, or the empty string. It does not include the leading comma.
|
||||
type tagOptions string
|
||||
|
||||
// parseTag splits a struct field's json tag into its name and
|
||||
// comma-separated options.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
if idx := strings.Index(tag, ","); idx != -1 {
|
||||
return tag[:idx], tagOptions(tag[idx+1:])
|
||||
}
|
||||
return tag, tagOptions("")
|
||||
}
|
||||
|
||||
// Contains reports whether a comma-separated list of options
|
||||
// contains a particular substr flag. substr must be surrounded by a
|
||||
// string boundary or commas.
|
||||
func (o tagOptions) Contains(optionName string) bool {
|
||||
if len(o) == 0 {
|
||||
return false
|
||||
}
|
||||
s := string(o)
|
||||
for s != "" {
|
||||
var next string
|
||||
i := strings.Index(s, ",")
|
||||
if i >= 0 {
|
||||
s, next = s[:i], s[i+1:]
|
||||
}
|
||||
if s == optionName {
|
||||
return true
|
||||
}
|
||||
s = next
|
||||
}
|
||||
return false
|
||||
}
|
||||
28
vendor/github.com/ajeddeloh/go-json/tags_test.go
generated
vendored
Normal file
28
vendor/github.com/ajeddeloh/go-json/tags_test.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTagParsing(t *testing.T) {
|
||||
name, opts := parseTag("field,foobar,foo")
|
||||
if name != "field" {
|
||||
t.Fatalf("name = %q, want field", name)
|
||||
}
|
||||
for _, tt := range []struct {
|
||||
opt string
|
||||
want bool
|
||||
}{
|
||||
{"foobar", true},
|
||||
{"foo", true},
|
||||
{"bar", false},
|
||||
} {
|
||||
if opts.Contains(tt.opt) != tt.want {
|
||||
t.Errorf("Contains(%q) = %v", tt.opt, !tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
vendor/github.com/ajeddeloh/go-json/testdata/code.json.gz
generated
vendored
Normal file
BIN
vendor/github.com/ajeddeloh/go-json/testdata/code.json.gz
generated
vendored
Normal file
Binary file not shown.
13
vendor/github.com/ajeddeloh/yaml/LICENSE
generated
vendored
Normal file
13
vendor/github.com/ajeddeloh/yaml/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
Copyright 2011-2016 Canonical Ltd.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
0
vendor/github.com/go-yaml/yaml/apic.go → vendor/github.com/ajeddeloh/yaml/apic.go
generated
vendored
0
vendor/github.com/go-yaml/yaml/apic.go → vendor/github.com/ajeddeloh/yaml/apic.go
generated
vendored
224
vendor/github.com/go-yaml/yaml/decode.go → vendor/github.com/ajeddeloh/yaml/decode.go
generated
vendored
224
vendor/github.com/go-yaml/yaml/decode.go → vendor/github.com/ajeddeloh/yaml/decode.go
generated
vendored
@@ -11,30 +11,30 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
documentNode = 1 << iota
|
||||
mappingNode
|
||||
sequenceNode
|
||||
scalarNode
|
||||
aliasNode
|
||||
DocumentNode = 1 << iota
|
||||
MappingNode
|
||||
SequenceNode
|
||||
ScalarNode
|
||||
AliasNode
|
||||
)
|
||||
|
||||
type node struct {
|
||||
kind int
|
||||
line, column int
|
||||
tag string
|
||||
value string
|
||||
implicit bool
|
||||
children []*node
|
||||
anchors map[string]*node
|
||||
type Node struct {
|
||||
Kind int
|
||||
Line, Column int
|
||||
Tag string
|
||||
Value string
|
||||
Implicit bool
|
||||
Children []*Node
|
||||
Anchors map[string]*Node
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parser, produces a node tree out of a libyaml event stream.
|
||||
// Parser, produces a Node tree out of a libyaml event stream.
|
||||
|
||||
type parser struct {
|
||||
parser yaml_parser_t
|
||||
event yaml_event_t
|
||||
doc *node
|
||||
doc *Node
|
||||
}
|
||||
|
||||
func newParser(b []byte) *parser {
|
||||
@@ -96,13 +96,13 @@ func (p *parser) fail() {
|
||||
failf("%s%s", where, msg)
|
||||
}
|
||||
|
||||
func (p *parser) anchor(n *node, anchor []byte) {
|
||||
func (p *parser) anchor(n *Node, anchor []byte) {
|
||||
if anchor != nil {
|
||||
p.doc.anchors[string(anchor)] = n
|
||||
p.doc.Anchors[string(anchor)] = n
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) parse() *node {
|
||||
func (p *parser) parse() *Node {
|
||||
switch p.event.typ {
|
||||
case yaml_SCALAR_EVENT:
|
||||
return p.scalar()
|
||||
@@ -123,20 +123,20 @@ func (p *parser) parse() *node {
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (p *parser) node(kind int) *node {
|
||||
return &node{
|
||||
kind: kind,
|
||||
line: p.event.start_mark.line,
|
||||
column: p.event.start_mark.column,
|
||||
func (p *parser) Node(kind int) *Node {
|
||||
return &Node{
|
||||
Kind: kind,
|
||||
Line: p.event.start_mark.line,
|
||||
Column: p.event.start_mark.column,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) document() *node {
|
||||
n := p.node(documentNode)
|
||||
n.anchors = make(map[string]*node)
|
||||
func (p *parser) document() *Node {
|
||||
n := p.Node(DocumentNode)
|
||||
n.Anchors = make(map[string]*Node)
|
||||
p.doc = n
|
||||
p.skip()
|
||||
n.children = append(n.children, p.parse())
|
||||
n.Children = append(n.Children, p.parse())
|
||||
if p.event.typ != yaml_DOCUMENT_END_EVENT {
|
||||
panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
|
||||
}
|
||||
@@ -144,50 +144,50 @@ func (p *parser) document() *node {
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) alias() *node {
|
||||
n := p.node(aliasNode)
|
||||
n.value = string(p.event.anchor)
|
||||
func (p *parser) alias() *Node {
|
||||
n := p.Node(AliasNode)
|
||||
n.Value = string(p.event.anchor)
|
||||
p.skip()
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) scalar() *node {
|
||||
n := p.node(scalarNode)
|
||||
n.value = string(p.event.value)
|
||||
n.tag = string(p.event.tag)
|
||||
n.implicit = p.event.implicit
|
||||
func (p *parser) scalar() *Node {
|
||||
n := p.Node(ScalarNode)
|
||||
n.Value = string(p.event.value)
|
||||
n.Tag = string(p.event.tag)
|
||||
n.Implicit = p.event.implicit
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.skip()
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) sequence() *node {
|
||||
n := p.node(sequenceNode)
|
||||
func (p *parser) sequence() *Node {
|
||||
n := p.Node(SequenceNode)
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.skip()
|
||||
for p.event.typ != yaml_SEQUENCE_END_EVENT {
|
||||
n.children = append(n.children, p.parse())
|
||||
n.Children = append(n.Children, p.parse())
|
||||
}
|
||||
p.skip()
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) mapping() *node {
|
||||
n := p.node(mappingNode)
|
||||
func (p *parser) mapping() *Node {
|
||||
n := p.Node(MappingNode)
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.skip()
|
||||
for p.event.typ != yaml_MAPPING_END_EVENT {
|
||||
n.children = append(n.children, p.parse(), p.parse())
|
||||
n.Children = append(n.Children, p.parse(), p.parse())
|
||||
}
|
||||
p.skip()
|
||||
return n
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Decoder, unmarshals a node into a provided value.
|
||||
// Decoder, unmarshals a Node into a provided value.
|
||||
|
||||
type decoder struct {
|
||||
doc *node
|
||||
doc *Node
|
||||
aliases map[string]bool
|
||||
mapType reflect.Type
|
||||
terrors []string
|
||||
@@ -206,11 +206,11 @@ func newDecoder() *decoder {
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *decoder) terror(n *node, tag string, out reflect.Value) {
|
||||
if n.tag != "" {
|
||||
tag = n.tag
|
||||
func (d *decoder) terror(n *Node, tag string, out reflect.Value) {
|
||||
if n.Tag != "" {
|
||||
tag = n.Tag
|
||||
}
|
||||
value := n.value
|
||||
value := n.Value
|
||||
if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
|
||||
if len(value) > 10 {
|
||||
value = " `" + value[:7] + "...`"
|
||||
@@ -218,10 +218,10 @@ func (d *decoder) terror(n *node, tag string, out reflect.Value) {
|
||||
value = " `" + value + "`"
|
||||
}
|
||||
}
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line+1, shortTag(tag), value, out.Type()))
|
||||
}
|
||||
|
||||
func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
|
||||
func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) {
|
||||
terrlen := len(d.terrors)
|
||||
err := u.UnmarshalYAML(func(v interface{}) (err error) {
|
||||
defer handleErr(&err)
|
||||
@@ -250,8 +250,8 @@ func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
|
||||
// its types unmarshalled appropriately.
|
||||
//
|
||||
// If n holds a null value, prepare returns before doing anything.
|
||||
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
|
||||
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
|
||||
func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
|
||||
if n.Tag == yaml_NULL_TAG || n.Kind == ScalarNode && n.Tag == "" && (n.Value == "null" || n.Value == "") {
|
||||
return out, false, false
|
||||
}
|
||||
again := true
|
||||
@@ -274,50 +274,50 @@ func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unm
|
||||
return out, false, false
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
|
||||
switch n.kind {
|
||||
case documentNode:
|
||||
func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) {
|
||||
switch n.Kind {
|
||||
case DocumentNode:
|
||||
return d.document(n, out)
|
||||
case aliasNode:
|
||||
case AliasNode:
|
||||
return d.alias(n, out)
|
||||
}
|
||||
out, unmarshaled, good := d.prepare(n, out)
|
||||
if unmarshaled {
|
||||
return good
|
||||
}
|
||||
switch n.kind {
|
||||
case scalarNode:
|
||||
switch n.Kind {
|
||||
case ScalarNode:
|
||||
good = d.scalar(n, out)
|
||||
case mappingNode:
|
||||
case MappingNode:
|
||||
good = d.mapping(n, out)
|
||||
case sequenceNode:
|
||||
case SequenceNode:
|
||||
good = d.sequence(n, out)
|
||||
default:
|
||||
panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
|
||||
panic("internal error: unknown Node kind: " + strconv.Itoa(n.Kind))
|
||||
}
|
||||
return good
|
||||
}
|
||||
|
||||
func (d *decoder) document(n *node, out reflect.Value) (good bool) {
|
||||
if len(n.children) == 1 {
|
||||
func (d *decoder) document(n *Node, out reflect.Value) (good bool) {
|
||||
if len(n.Children) == 1 {
|
||||
d.doc = n
|
||||
d.unmarshal(n.children[0], out)
|
||||
d.unmarshal(n.Children[0], out)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
|
||||
an, ok := d.doc.anchors[n.value]
|
||||
func (d *decoder) alias(n *Node, out reflect.Value) (good bool) {
|
||||
an, ok := d.doc.Anchors[n.Value]
|
||||
if !ok {
|
||||
failf("unknown anchor '%s' referenced", n.value)
|
||||
failf("unknown anchor '%s' referenced", n.Value)
|
||||
}
|
||||
if d.aliases[n.value] {
|
||||
failf("anchor '%s' value contains itself", n.value)
|
||||
if d.aliases[n.Value] {
|
||||
failf("anchor '%s' value contains itself", n.Value)
|
||||
}
|
||||
d.aliases[n.value] = true
|
||||
d.aliases[n.Value] = true
|
||||
good = d.unmarshal(an, out)
|
||||
delete(d.aliases, n.value)
|
||||
delete(d.aliases, n.Value)
|
||||
return good
|
||||
}
|
||||
|
||||
@@ -329,14 +329,14 @@ func resetMap(out reflect.Value) {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
|
||||
func (d *decoder) scalar(n *Node, out reflect.Value) (good bool) {
|
||||
var tag string
|
||||
var resolved interface{}
|
||||
if n.tag == "" && !n.implicit {
|
||||
if n.Tag == "" && !n.Implicit {
|
||||
tag = yaml_STR_TAG
|
||||
resolved = n.value
|
||||
resolved = n.Value
|
||||
} else {
|
||||
tag, resolved = resolve(n.tag, n.value)
|
||||
tag, resolved = resolve(n.Tag, n.Value)
|
||||
if tag == yaml_BINARY_TAG {
|
||||
data, err := base64.StdEncoding.DecodeString(resolved.(string))
|
||||
if err != nil {
|
||||
@@ -368,7 +368,7 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
|
||||
out.SetString(resolved.(string))
|
||||
good = true
|
||||
} else if resolved != nil {
|
||||
out.SetString(n.value)
|
||||
out.SetString(n.Value)
|
||||
good = true
|
||||
}
|
||||
case reflect.Interface:
|
||||
@@ -475,8 +475,8 @@ func settableValueOf(i interface{}) reflect.Value {
|
||||
return sv
|
||||
}
|
||||
|
||||
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
|
||||
l := len(n.children)
|
||||
func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) {
|
||||
l := len(n.Children)
|
||||
|
||||
var iface reflect.Value
|
||||
switch out.Kind() {
|
||||
@@ -495,7 +495,7 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
|
||||
j := 0
|
||||
for i := 0; i < l; i++ {
|
||||
e := reflect.New(et).Elem()
|
||||
if ok := d.unmarshal(n.children[i], e); ok {
|
||||
if ok := d.unmarshal(n.Children[i], e); ok {
|
||||
out.Index(j).Set(e)
|
||||
j++
|
||||
}
|
||||
@@ -507,7 +507,7 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
||||
func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
|
||||
switch out.Kind() {
|
||||
case reflect.Struct:
|
||||
return d.mappingStruct(n, out)
|
||||
@@ -544,14 +544,14 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(outt))
|
||||
}
|
||||
l := len(n.children)
|
||||
l := len(n.Children)
|
||||
for i := 0; i < l; i += 2 {
|
||||
if isMerge(n.children[i]) {
|
||||
d.merge(n.children[i+1], out)
|
||||
if isMerge(n.Children[i]) {
|
||||
d.merge(n.Children[i+1], out)
|
||||
continue
|
||||
}
|
||||
k := reflect.New(kt).Elem()
|
||||
if d.unmarshal(n.children[i], k) {
|
||||
if d.unmarshal(n.Children[i], k) {
|
||||
kkind := k.Kind()
|
||||
if kkind == reflect.Interface {
|
||||
kkind = k.Elem().Kind()
|
||||
@@ -560,7 +560,7 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
||||
failf("invalid map key: %#v", k.Interface())
|
||||
}
|
||||
e := reflect.New(et).Elem()
|
||||
if d.unmarshal(n.children[i+1], e) {
|
||||
if d.unmarshal(n.Children[i+1], e) {
|
||||
out.SetMapIndex(k, e)
|
||||
}
|
||||
}
|
||||
@@ -569,7 +569,7 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
|
||||
func (d *decoder) mappingSlice(n *Node, out reflect.Value) (good bool) {
|
||||
outt := out.Type()
|
||||
if outt.Elem() != mapItemType {
|
||||
d.terror(n, yaml_MAP_TAG, out)
|
||||
@@ -580,17 +580,17 @@ func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
|
||||
d.mapType = outt
|
||||
|
||||
var slice []MapItem
|
||||
var l = len(n.children)
|
||||
var l = len(n.Children)
|
||||
for i := 0; i < l; i += 2 {
|
||||
if isMerge(n.children[i]) {
|
||||
d.merge(n.children[i+1], out)
|
||||
if isMerge(n.Children[i]) {
|
||||
d.merge(n.Children[i+1], out)
|
||||
continue
|
||||
}
|
||||
item := MapItem{}
|
||||
k := reflect.ValueOf(&item.Key).Elem()
|
||||
if d.unmarshal(n.children[i], k) {
|
||||
if d.unmarshal(n.Children[i], k) {
|
||||
v := reflect.ValueOf(&item.Value).Elem()
|
||||
if d.unmarshal(n.children[i+1], v) {
|
||||
if d.unmarshal(n.Children[i+1], v) {
|
||||
slice = append(slice, item)
|
||||
}
|
||||
}
|
||||
@@ -600,13 +600,13 @@ func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
||||
func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
|
||||
sinfo, err := getStructInfo(out.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
name := settableValueOf("")
|
||||
l := len(n.children)
|
||||
l := len(n.Children)
|
||||
|
||||
var inlineMap reflect.Value
|
||||
var elemType reflect.Type
|
||||
@@ -617,9 +617,9 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
||||
}
|
||||
|
||||
for i := 0; i < l; i += 2 {
|
||||
ni := n.children[i]
|
||||
ni := n.Children[i]
|
||||
if isMerge(ni) {
|
||||
d.merge(n.children[i+1], out)
|
||||
d.merge(n.Children[i+1], out)
|
||||
continue
|
||||
}
|
||||
if !d.unmarshal(ni, name) {
|
||||
@@ -632,13 +632,13 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
||||
} else {
|
||||
field = out.FieldByIndex(info.Inline)
|
||||
}
|
||||
d.unmarshal(n.children[i+1], field)
|
||||
d.unmarshal(n.Children[i+1], field)
|
||||
} else if sinfo.InlineMap != -1 {
|
||||
if inlineMap.IsNil() {
|
||||
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
|
||||
}
|
||||
value := reflect.New(elemType).Elem()
|
||||
d.unmarshal(n.children[i+1], value)
|
||||
d.unmarshal(n.Children[i+1], value)
|
||||
inlineMap.SetMapIndex(name, value)
|
||||
}
|
||||
}
|
||||
@@ -649,26 +649,26 @@ func failWantMap() {
|
||||
failf("map merge requires map or sequence of maps as the value")
|
||||
}
|
||||
|
||||
func (d *decoder) merge(n *node, out reflect.Value) {
|
||||
switch n.kind {
|
||||
case mappingNode:
|
||||
func (d *decoder) merge(n *Node, out reflect.Value) {
|
||||
switch n.Kind {
|
||||
case MappingNode:
|
||||
d.unmarshal(n, out)
|
||||
case aliasNode:
|
||||
an, ok := d.doc.anchors[n.value]
|
||||
if ok && an.kind != mappingNode {
|
||||
case AliasNode:
|
||||
an, ok := d.doc.Anchors[n.Value]
|
||||
if ok && an.Kind != MappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
d.unmarshal(n, out)
|
||||
case sequenceNode:
|
||||
// Step backwards as earlier nodes take precedence.
|
||||
for i := len(n.children) - 1; i >= 0; i-- {
|
||||
ni := n.children[i]
|
||||
if ni.kind == aliasNode {
|
||||
an, ok := d.doc.anchors[ni.value]
|
||||
if ok && an.kind != mappingNode {
|
||||
case SequenceNode:
|
||||
// Step backwards as earlier Nodes take precedence.
|
||||
for i := len(n.Children) - 1; i >= 0; i-- {
|
||||
ni := n.Children[i]
|
||||
if ni.Kind == AliasNode {
|
||||
an, ok := d.doc.Anchors[ni.Value]
|
||||
if ok && an.Kind != MappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
} else if ni.kind != mappingNode {
|
||||
} else if ni.Kind != MappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
d.unmarshal(ni, out)
|
||||
@@ -678,6 +678,6 @@ func (d *decoder) merge(n *node, out reflect.Value) {
|
||||
}
|
||||
}
|
||||
|
||||
func isMerge(n *node) bool {
|
||||
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
|
||||
func isMerge(n *Node) bool {
|
||||
return n.Kind == ScalarNode && n.Value == "<<" && (n.Implicit == true || n.Tag == yaml_MERGE_TAG)
|
||||
}
|
||||
11
vendor/github.com/go-yaml/yaml/yaml.go → vendor/github.com/ajeddeloh/yaml/yaml.go
generated
vendored
11
vendor/github.com/go-yaml/yaml/yaml.go → vendor/github.com/ajeddeloh/yaml/yaml.go
generated
vendored
@@ -95,6 +95,17 @@ func Unmarshal(in []byte, out interface{}) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func UnmarshalToNode(in []byte) *Node {
|
||||
p := newParser(in)
|
||||
//defer p.destroy()
|
||||
node := p.parse()
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
tmp := Node(*node)
|
||||
return &tmp
|
||||
}
|
||||
|
||||
// Marshal serializes the value provided into a YAML document. The structure
|
||||
// of the generated document will reflect the structure of the value itself.
|
||||
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
|
||||
0
vendor/github.com/go-yaml/yaml/yamlh.go → vendor/github.com/ajeddeloh/yaml/yamlh.go
generated
vendored
0
vendor/github.com/go-yaml/yaml/yamlh.go → vendor/github.com/ajeddeloh/yaml/yamlh.go
generated
vendored
90
vendor/github.com/coreos/fuze/config/astyaml.go
generated
vendored
Normal file
90
vendor/github.com/coreos/fuze/config/astyaml.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
yaml "github.com/ajeddeloh/yaml"
|
||||
"github.com/coreos/ignition/config/validate"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotDocumentNode = errors.New("Can only convert from document node")
|
||||
)
|
||||
|
||||
type YamlNode struct {
|
||||
key yaml.Node
|
||||
yaml.Node
|
||||
}
|
||||
|
||||
func FromYamlDocumentNode(n yaml.Node) (YamlNode, error) {
|
||||
if n.Kind != yaml.DocumentNode {
|
||||
return YamlNode{}, ErrNotDocumentNode
|
||||
}
|
||||
|
||||
return YamlNode{
|
||||
key: n,
|
||||
Node: *n.Children[0],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (n YamlNode) ValueLineCol(source io.ReadSeeker) (int, int, string) {
|
||||
return n.Line, n.Column, ""
|
||||
}
|
||||
|
||||
func (n YamlNode) KeyLineCol(source io.ReadSeeker) (int, int, string) {
|
||||
return n.key.Line, n.key.Column, ""
|
||||
}
|
||||
|
||||
func (n YamlNode) LiteralValue() interface{} {
|
||||
return n.Value
|
||||
}
|
||||
|
||||
func (n YamlNode) SliceChild(index int) (validate.AstNode, bool) {
|
||||
if n.Kind != yaml.SequenceNode {
|
||||
return nil, false
|
||||
}
|
||||
if index >= len(n.Children) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return YamlNode{
|
||||
key: yaml.Node{},
|
||||
Node: *n.Children[index],
|
||||
}, true
|
||||
}
|
||||
|
||||
func (n YamlNode) KeyValueMap() (map[string]validate.AstNode, bool) {
|
||||
if n.Kind != yaml.MappingNode {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
kvmap := map[string]validate.AstNode{}
|
||||
for i := 0; i < len(n.Children); i += 2 {
|
||||
key := *n.Children[i]
|
||||
value := *n.Children[i+1]
|
||||
kvmap[key.Value] = YamlNode{
|
||||
key: key,
|
||||
Node: value,
|
||||
}
|
||||
}
|
||||
return kvmap, true
|
||||
}
|
||||
|
||||
func (n YamlNode) Tag() string {
|
||||
return "yaml"
|
||||
}
|
||||
38
vendor/github.com/coreos/fuze/config/config.go
generated
vendored
38
vendor/github.com/coreos/fuze/config/config.go
generated
vendored
@@ -17,24 +17,38 @@ package config
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/coreos/ignition/config/types"
|
||||
"github.com/go-yaml/yaml"
|
||||
yaml "github.com/ajeddeloh/yaml"
|
||||
"github.com/coreos/fuze/config/types"
|
||||
"github.com/coreos/ignition/config/validate"
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
func ParseAsV2_0_0(data []byte) (types.Config, error) {
|
||||
var cfg Config
|
||||
func Parse(data []byte) (types.Config, report.Report) {
|
||||
var cfg types.Config
|
||||
var r report.Report
|
||||
|
||||
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
||||
return types.Config{}, err
|
||||
return types.Config{}, report.ReportFromError(err, report.EntryError)
|
||||
}
|
||||
|
||||
var keyMap map[interface{}]interface{}
|
||||
if err := yaml.Unmarshal(data, &keyMap); err != nil {
|
||||
return types.Config{}, err
|
||||
nodes := yaml.UnmarshalToNode(data)
|
||||
if nodes == nil {
|
||||
r.Add(report.Entry{
|
||||
Kind: report.EntryWarning,
|
||||
Message: "Configuration is empty",
|
||||
})
|
||||
r.Merge(validate.ValidateWithoutSource(reflect.ValueOf(cfg)))
|
||||
} else {
|
||||
root, err := FromYamlDocumentNode(*nodes)
|
||||
if err != nil {
|
||||
return types.Config{}, report.ReportFromError(err, report.EntryError)
|
||||
}
|
||||
|
||||
r.Merge(validate.Validate(reflect.ValueOf(cfg), root, nil))
|
||||
}
|
||||
|
||||
if err := assertKeysValid(keyMap, reflect.TypeOf(Config{})); err != nil {
|
||||
return types.Config{}, err
|
||||
if r.IsFatal() {
|
||||
return types.Config{}, r
|
||||
}
|
||||
|
||||
return ConvertAs2_0_0(cfg)
|
||||
return cfg, r
|
||||
}
|
||||
|
||||
743
vendor/github.com/coreos/fuze/config/config_test.go
generated
vendored
743
vendor/github.com/coreos/fuze/config/config_test.go
generated
vendored
@@ -19,16 +19,18 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
fuzeTypes "github.com/coreos/fuze/config/types"
|
||||
"github.com/coreos/ignition/config/types"
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
func TestParseAsV2_0_0(t *testing.T) {
|
||||
func TestParse(t *testing.T) {
|
||||
type in struct {
|
||||
data string
|
||||
}
|
||||
type out struct {
|
||||
cfg types.Config
|
||||
err error
|
||||
cfg fuzeTypes.Config
|
||||
r report.Report
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
@@ -36,14 +38,16 @@ func TestParseAsV2_0_0(t *testing.T) {
|
||||
out out
|
||||
}{
|
||||
{
|
||||
in: in{data: ``},
|
||||
out: out{cfg: types.Config{Ignition: types.Ignition{Version: types.IgnitionVersion{Major: 2}}}},
|
||||
},
|
||||
|
||||
// Errors
|
||||
{
|
||||
in: in{data: `foo:`},
|
||||
out: out{err: ErrKeysUnrecognized{"foo"}},
|
||||
in: in{data: ``},
|
||||
out: out{
|
||||
cfg: fuzeTypes.Config{},
|
||||
r: report.Report{
|
||||
Entries: []report.Entry{{
|
||||
Kind: report.EntryWarning,
|
||||
Message: "Configuration is empty",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: in{data: `
|
||||
@@ -52,7 +56,13 @@ networkd:
|
||||
- name: bad.blah
|
||||
contents: not valid
|
||||
`},
|
||||
out: out{err: errors.New("invalid networkd unit extension")},
|
||||
out: out{cfg: fuzeTypes.Config{
|
||||
Networkd: fuzeTypes.Networkd{
|
||||
Units: []fuzeTypes.NetworkdUnit{
|
||||
{Name: "bad.blah", Contents: "not valid"},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
|
||||
// Config
|
||||
@@ -74,40 +84,27 @@ ignition:
|
||||
function: sha512
|
||||
sum: 00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
|
||||
`},
|
||||
out: out{cfg: types.Config{
|
||||
Ignition: types.Ignition{
|
||||
Version: types.IgnitionVersion{Major: 2},
|
||||
Config: types.IgnitionConfig{
|
||||
Append: []types.ConfigReference{
|
||||
out: out{cfg: fuzeTypes.Config{
|
||||
Ignition: fuzeTypes.Ignition{
|
||||
Config: fuzeTypes.IgnitionConfig{
|
||||
Append: []fuzeTypes.ConfigReference{
|
||||
{
|
||||
Source: types.Url{
|
||||
Scheme: "http",
|
||||
Host: "example.com",
|
||||
Path: "/test1",
|
||||
},
|
||||
Verification: types.Verification{
|
||||
Hash: &types.Hash{
|
||||
Source: "http://example.com/test1",
|
||||
Verification: fuzeTypes.Verification{
|
||||
Hash: fuzeTypes.Hash{
|
||||
Function: "sha512",
|
||||
Sum: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Source: types.Url{
|
||||
Scheme: "http",
|
||||
Host: "example.com",
|
||||
Path: "/test2",
|
||||
},
|
||||
Source: "http://example.com/test2",
|
||||
},
|
||||
},
|
||||
Replace: &types.ConfigReference{
|
||||
Source: types.Url{
|
||||
Scheme: "http",
|
||||
Host: "example.com",
|
||||
Path: "/test3",
|
||||
},
|
||||
Verification: types.Verification{
|
||||
Hash: &types.Hash{
|
||||
Replace: &fuzeTypes.ConfigReference{
|
||||
Source: "http://example.com/test3",
|
||||
Verification: fuzeTypes.Verification{
|
||||
Hash: fuzeTypes.Hash{
|
||||
Function: "sha512",
|
||||
Sum: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
@@ -207,6 +204,525 @@ storage:
|
||||
- path: /opt/file4
|
||||
filesystem: filesystem2
|
||||
`},
|
||||
out: out{cfg: fuzeTypes.Config{
|
||||
Storage: fuzeTypes.Storage{
|
||||
Disks: []fuzeTypes.Disk{
|
||||
{
|
||||
Device: "/dev/sda",
|
||||
WipeTable: true,
|
||||
Partitions: []fuzeTypes.Partition{
|
||||
{
|
||||
Label: "ROOT",
|
||||
Number: 7,
|
||||
Size: "100MB",
|
||||
Start: "50MB",
|
||||
TypeGUID: "11111111-1111-1111-1111-111111111111",
|
||||
},
|
||||
{
|
||||
Label: "DATA",
|
||||
Number: 12,
|
||||
Size: "1GB",
|
||||
Start: "300MB",
|
||||
TypeGUID: "00000000-0000-0000-0000-000000000000",
|
||||
},
|
||||
{
|
||||
Label: "NOTHING",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Device: "/dev/sdb",
|
||||
WipeTable: true,
|
||||
},
|
||||
},
|
||||
Arrays: []fuzeTypes.Raid{
|
||||
{
|
||||
Name: "fast",
|
||||
Level: "raid0",
|
||||
Devices: []string{"/dev/sdc", "/dev/sdd"},
|
||||
},
|
||||
{
|
||||
Name: "durable",
|
||||
Level: "raid1",
|
||||
Devices: []string{"/dev/sde", "/dev/sdf", "/dev/sdg"},
|
||||
Spares: 1,
|
||||
},
|
||||
},
|
||||
Filesystems: []fuzeTypes.Filesystem{
|
||||
{
|
||||
Name: "filesystem1",
|
||||
Mount: &fuzeTypes.Mount{
|
||||
Device: "/dev/disk/by-partlabel/ROOT",
|
||||
Format: "btrfs",
|
||||
Create: &fuzeTypes.Create{
|
||||
Force: true,
|
||||
Options: []string{"-L", "ROOT"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "filesystem2",
|
||||
Mount: &fuzeTypes.Mount{
|
||||
Device: "/dev/disk/by-partlabel/DATA",
|
||||
Format: "ext4",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "filesystem3",
|
||||
Path: "/sysroot",
|
||||
},
|
||||
},
|
||||
Files: []fuzeTypes.File{
|
||||
{
|
||||
Filesystem: "filesystem1",
|
||||
Path: "/opt/file1",
|
||||
Contents: fuzeTypes.FileContents{
|
||||
Inline: "file1",
|
||||
},
|
||||
Mode: 0644,
|
||||
User: fuzeTypes.FileUser{Id: 500},
|
||||
Group: fuzeTypes.FileGroup{Id: 501},
|
||||
},
|
||||
{
|
||||
Filesystem: "filesystem1",
|
||||
Path: "/opt/file2",
|
||||
Contents: fuzeTypes.FileContents{
|
||||
Remote: fuzeTypes.Remote{
|
||||
Url: "http://example.com/file2",
|
||||
Compression: "gzip",
|
||||
Verification: fuzeTypes.Verification{
|
||||
Hash: fuzeTypes.Hash{
|
||||
Function: "sha512",
|
||||
Sum: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Mode: 0644,
|
||||
User: fuzeTypes.FileUser{Id: 502},
|
||||
Group: fuzeTypes.FileGroup{Id: 503},
|
||||
},
|
||||
{
|
||||
Filesystem: "filesystem2",
|
||||
Path: "/opt/file3",
|
||||
Contents: fuzeTypes.FileContents{
|
||||
Remote: fuzeTypes.Remote{
|
||||
Url: "http://example.com/file3",
|
||||
Compression: "gzip",
|
||||
},
|
||||
},
|
||||
Mode: 0400,
|
||||
User: fuzeTypes.FileUser{Id: 1000},
|
||||
Group: fuzeTypes.FileGroup{Id: 1001},
|
||||
},
|
||||
{
|
||||
Filesystem: "filesystem2",
|
||||
Path: "/opt/file4",
|
||||
Contents: fuzeTypes.FileContents{
|
||||
Inline: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
|
||||
// systemd
|
||||
{
|
||||
in: in{data: `
|
||||
systemd:
|
||||
units:
|
||||
- name: test1.service
|
||||
enable: true
|
||||
contents: test1 contents
|
||||
dropins:
|
||||
- name: conf1.conf
|
||||
contents: conf1 contents
|
||||
- name: conf2.conf
|
||||
contents: conf2 contents
|
||||
- name: test2.service
|
||||
mask: true
|
||||
contents: test2 contents
|
||||
`},
|
||||
out: out{cfg: fuzeTypes.Config{
|
||||
Systemd: fuzeTypes.Systemd{
|
||||
Units: []fuzeTypes.SystemdUnit{
|
||||
{
|
||||
Name: "test1.service",
|
||||
Enable: true,
|
||||
Contents: "test1 contents",
|
||||
DropIns: []fuzeTypes.SystemdUnitDropIn{
|
||||
{
|
||||
Name: "conf1.conf",
|
||||
Contents: "conf1 contents",
|
||||
},
|
||||
{
|
||||
Name: "conf2.conf",
|
||||
Contents: "conf2 contents",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "test2.service",
|
||||
Mask: true,
|
||||
Contents: "test2 contents",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
|
||||
// networkd
|
||||
{
|
||||
in: in{data: `
|
||||
networkd:
|
||||
units:
|
||||
- name: empty.netdev
|
||||
- name: test.network
|
||||
contents: test config
|
||||
`},
|
||||
out: out{cfg: fuzeTypes.Config{
|
||||
Networkd: fuzeTypes.Networkd{
|
||||
Units: []fuzeTypes.NetworkdUnit{
|
||||
{
|
||||
Name: "empty.netdev",
|
||||
},
|
||||
{
|
||||
Name: "test.network",
|
||||
Contents: "test config",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
|
||||
// passwd
|
||||
{
|
||||
in: in{data: `
|
||||
passwd:
|
||||
users:
|
||||
- name: user 1
|
||||
password_hash: password 1
|
||||
ssh_authorized_keys:
|
||||
- key1
|
||||
- key2
|
||||
- name: user 2
|
||||
password_hash: password 2
|
||||
ssh_authorized_keys:
|
||||
- key3
|
||||
- key4
|
||||
create:
|
||||
uid: 123
|
||||
gecos: gecos
|
||||
home_dir: /home/user 2
|
||||
no_create_home: true
|
||||
primary_group: wheel
|
||||
groups:
|
||||
- wheel
|
||||
- plugdev
|
||||
no_user_group: true
|
||||
system: true
|
||||
no_log_init: true
|
||||
shell: /bin/zsh
|
||||
- name: user 3
|
||||
password_hash: password 3
|
||||
ssh_authorized_keys:
|
||||
- key5
|
||||
- key6
|
||||
create: {}
|
||||
groups:
|
||||
- name: group 1
|
||||
gid: 1000
|
||||
password_hash: password 1
|
||||
system: true
|
||||
- name: group 2
|
||||
password_hash: password 2
|
||||
`},
|
||||
out: out{cfg: fuzeTypes.Config{
|
||||
Passwd: fuzeTypes.Passwd{
|
||||
Users: []fuzeTypes.User{
|
||||
{
|
||||
Name: "user 1",
|
||||
PasswordHash: "password 1",
|
||||
SSHAuthorizedKeys: []string{"key1", "key2"},
|
||||
},
|
||||
{
|
||||
Name: "user 2",
|
||||
PasswordHash: "password 2",
|
||||
SSHAuthorizedKeys: []string{"key3", "key4"},
|
||||
Create: &fuzeTypes.UserCreate{
|
||||
Uid: func(i uint) *uint { return &i }(123),
|
||||
GECOS: "gecos",
|
||||
Homedir: "/home/user 2",
|
||||
NoCreateHome: true,
|
||||
PrimaryGroup: "wheel",
|
||||
Groups: []string{"wheel", "plugdev"},
|
||||
NoUserGroup: true,
|
||||
System: true,
|
||||
NoLogInit: true,
|
||||
Shell: "/bin/zsh",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "user 3",
|
||||
PasswordHash: "password 3",
|
||||
SSHAuthorizedKeys: []string{"key5", "key6"},
|
||||
Create: &fuzeTypes.UserCreate{},
|
||||
},
|
||||
},
|
||||
Groups: []fuzeTypes.Group{
|
||||
{
|
||||
Name: "group 1",
|
||||
Gid: func(i uint) *uint { return &i }(1000),
|
||||
PasswordHash: "password 1",
|
||||
System: true,
|
||||
},
|
||||
{
|
||||
Name: "group 2",
|
||||
PasswordHash: "password 2",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
cfg, err := Parse([]byte(test.in.data))
|
||||
if !reflect.DeepEqual(err, test.out.r) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.r, err)
|
||||
}
|
||||
if !reflect.DeepEqual(cfg, test.out.cfg) {
|
||||
t.Errorf("#%d: bad config: want %#v, got %#v", i, test.out.cfg, cfg)
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestConvertAs2_0_0(t *testing.T) {
|
||||
type in struct {
|
||||
cfg fuzeTypes.Config
|
||||
}
|
||||
type out struct {
|
||||
cfg types.Config
|
||||
r report.Report
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
in in
|
||||
out out
|
||||
}{
|
||||
{
|
||||
in: in{cfg: fuzeTypes.Config{}},
|
||||
out: out{cfg: types.Config{Ignition: types.Ignition{Version: types.IgnitionVersion{Major: 2}}}},
|
||||
},
|
||||
{
|
||||
in: in{cfg: fuzeTypes.Config{
|
||||
Networkd: fuzeTypes.Networkd{
|
||||
Units: []fuzeTypes.NetworkdUnit{
|
||||
{Name: "bad.blah", Contents: "not valid"},
|
||||
},
|
||||
},
|
||||
}},
|
||||
out: out{r: report.ReportFromError(errors.New("invalid networkd unit extension"), report.EntryError)},
|
||||
},
|
||||
|
||||
// Config
|
||||
{
|
||||
in: in{cfg: fuzeTypes.Config{
|
||||
Ignition: fuzeTypes.Ignition{
|
||||
Config: fuzeTypes.IgnitionConfig{
|
||||
Append: []fuzeTypes.ConfigReference{
|
||||
{
|
||||
Source: "http://example.com/test1",
|
||||
Verification: fuzeTypes.Verification{
|
||||
Hash: fuzeTypes.Hash{
|
||||
Function: "sha512",
|
||||
Sum: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Source: "http://example.com/test2",
|
||||
},
|
||||
},
|
||||
Replace: &fuzeTypes.ConfigReference{
|
||||
Source: "http://example.com/test3",
|
||||
Verification: fuzeTypes.Verification{
|
||||
Hash: fuzeTypes.Hash{
|
||||
Function: "sha512",
|
||||
Sum: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
out: out{cfg: types.Config{
|
||||
Ignition: types.Ignition{
|
||||
Version: types.IgnitionVersion{Major: 2},
|
||||
Config: types.IgnitionConfig{
|
||||
Append: []types.ConfigReference{
|
||||
{
|
||||
Source: types.Url{
|
||||
Scheme: "http",
|
||||
Host: "example.com",
|
||||
Path: "/test1",
|
||||
},
|
||||
Verification: types.Verification{
|
||||
Hash: &types.Hash{
|
||||
Function: "sha512",
|
||||
Sum: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Source: types.Url{
|
||||
Scheme: "http",
|
||||
Host: "example.com",
|
||||
Path: "/test2",
|
||||
},
|
||||
},
|
||||
},
|
||||
Replace: &types.ConfigReference{
|
||||
Source: types.Url{
|
||||
Scheme: "http",
|
||||
Host: "example.com",
|
||||
Path: "/test3",
|
||||
},
|
||||
Verification: types.Verification{
|
||||
Hash: &types.Hash{
|
||||
Function: "sha512",
|
||||
Sum: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
|
||||
// Storage
|
||||
{
|
||||
in: in{cfg: fuzeTypes.Config{
|
||||
Storage: fuzeTypes.Storage{
|
||||
Disks: []fuzeTypes.Disk{
|
||||
{
|
||||
Device: "/dev/sda",
|
||||
WipeTable: true,
|
||||
Partitions: []fuzeTypes.Partition{
|
||||
{
|
||||
Label: "ROOT",
|
||||
Number: 7,
|
||||
Size: "100MB",
|
||||
Start: "50MB",
|
||||
TypeGUID: "11111111-1111-1111-1111-111111111111",
|
||||
},
|
||||
{
|
||||
Label: "DATA",
|
||||
Number: 12,
|
||||
Size: "1GB",
|
||||
Start: "300MB",
|
||||
TypeGUID: "00000000-0000-0000-0000-000000000000",
|
||||
},
|
||||
{
|
||||
Label: "NOTHING",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Device: "/dev/sdb",
|
||||
WipeTable: true,
|
||||
},
|
||||
},
|
||||
Arrays: []fuzeTypes.Raid{
|
||||
{
|
||||
Name: "fast",
|
||||
Level: "raid0",
|
||||
Devices: []string{"/dev/sdc", "/dev/sdd"},
|
||||
},
|
||||
{
|
||||
Name: "durable",
|
||||
Level: "raid1",
|
||||
Devices: []string{"/dev/sde", "/dev/sdf", "/dev/sdg"},
|
||||
Spares: 1,
|
||||
},
|
||||
},
|
||||
Filesystems: []fuzeTypes.Filesystem{
|
||||
{
|
||||
Name: "filesystem1",
|
||||
Mount: &fuzeTypes.Mount{
|
||||
Device: "/dev/disk/by-partlabel/ROOT",
|
||||
Format: "btrfs",
|
||||
Create: &fuzeTypes.Create{
|
||||
Force: true,
|
||||
Options: []string{"-L", "ROOT"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "filesystem2",
|
||||
Mount: &fuzeTypes.Mount{
|
||||
Device: "/dev/disk/by-partlabel/DATA",
|
||||
Format: "ext4",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "filesystem3",
|
||||
Path: "/sysroot",
|
||||
},
|
||||
},
|
||||
Files: []fuzeTypes.File{
|
||||
{
|
||||
Filesystem: "filesystem1",
|
||||
Path: "/opt/file1",
|
||||
Contents: fuzeTypes.FileContents{
|
||||
Inline: "file1",
|
||||
},
|
||||
Mode: 0644,
|
||||
User: fuzeTypes.FileUser{Id: 500},
|
||||
Group: fuzeTypes.FileGroup{Id: 501},
|
||||
},
|
||||
{
|
||||
Filesystem: "filesystem1",
|
||||
Path: "/opt/file2",
|
||||
Contents: fuzeTypes.FileContents{
|
||||
Remote: fuzeTypes.Remote{
|
||||
Url: "http://example.com/file2",
|
||||
Compression: "gzip",
|
||||
Verification: fuzeTypes.Verification{
|
||||
Hash: fuzeTypes.Hash{
|
||||
Function: "sha512",
|
||||
Sum: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Mode: 0644,
|
||||
User: fuzeTypes.FileUser{Id: 502},
|
||||
Group: fuzeTypes.FileGroup{Id: 503},
|
||||
},
|
||||
{
|
||||
Filesystem: "filesystem2",
|
||||
Path: "/opt/file3",
|
||||
Contents: fuzeTypes.FileContents{
|
||||
Remote: fuzeTypes.Remote{
|
||||
Url: "http://example.com/file3",
|
||||
Compression: "gzip",
|
||||
},
|
||||
},
|
||||
Mode: 0400,
|
||||
User: fuzeTypes.FileUser{Id: 1000},
|
||||
Group: fuzeTypes.FileGroup{Id: 1001},
|
||||
},
|
||||
{
|
||||
Filesystem: "filesystem2",
|
||||
Path: "/opt/file4",
|
||||
Contents: fuzeTypes.FileContents{
|
||||
Inline: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
out: out{cfg: types.Config{
|
||||
Ignition: types.Ignition{Version: types.IgnitionVersion{Major: 2}},
|
||||
Storage: types.Storage{
|
||||
@@ -343,21 +859,32 @@ storage:
|
||||
|
||||
// systemd
|
||||
{
|
||||
in: in{data: `
|
||||
systemd:
|
||||
units:
|
||||
- name: test1.service
|
||||
enable: true
|
||||
contents: test1 contents
|
||||
dropins:
|
||||
- name: conf1.conf
|
||||
contents: conf1 contents
|
||||
- name: conf2.conf
|
||||
contents: conf2 contents
|
||||
- name: test2.service
|
||||
mask: true
|
||||
contents: test2 contents
|
||||
`},
|
||||
in: in{cfg: fuzeTypes.Config{
|
||||
Systemd: fuzeTypes.Systemd{
|
||||
Units: []fuzeTypes.SystemdUnit{
|
||||
{
|
||||
Name: "test1.service",
|
||||
Enable: true,
|
||||
Contents: "test1 contents",
|
||||
DropIns: []fuzeTypes.SystemdUnitDropIn{
|
||||
{
|
||||
Name: "conf1.conf",
|
||||
Contents: "conf1 contents",
|
||||
},
|
||||
{
|
||||
Name: "conf2.conf",
|
||||
Contents: "conf2 contents",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "test2.service",
|
||||
Mask: true,
|
||||
Contents: "test2 contents",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
out: out{cfg: types.Config{
|
||||
Ignition: types.Ignition{Version: types.IgnitionVersion{Major: 2}},
|
||||
Systemd: types.Systemd{
|
||||
@@ -389,13 +916,19 @@ systemd:
|
||||
|
||||
// networkd
|
||||
{
|
||||
in: in{data: `
|
||||
networkd:
|
||||
units:
|
||||
- name: empty.netdev
|
||||
- name: test.network
|
||||
contents: test config
|
||||
`},
|
||||
in: in{cfg: fuzeTypes.Config{
|
||||
Networkd: fuzeTypes.Networkd{
|
||||
Units: []fuzeTypes.NetworkdUnit{
|
||||
{
|
||||
Name: "empty.netdev",
|
||||
},
|
||||
{
|
||||
Name: "test.network",
|
||||
Contents: "test config",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
out: out{cfg: types.Config{
|
||||
Ignition: types.Ignition{Version: types.IgnitionVersion{Major: 2}},
|
||||
Networkd: types.Networkd{
|
||||
@@ -414,46 +947,52 @@ networkd:
|
||||
|
||||
// passwd
|
||||
{
|
||||
in: in{data: `
|
||||
passwd:
|
||||
users:
|
||||
- name: user 1
|
||||
password_hash: password 1
|
||||
ssh_authorized_keys:
|
||||
- key1
|
||||
- key2
|
||||
- name: user 2
|
||||
password_hash: password 2
|
||||
ssh_authorized_keys:
|
||||
- key3
|
||||
- key4
|
||||
create:
|
||||
uid: 123
|
||||
gecos: gecos
|
||||
home_dir: /home/user 2
|
||||
no_create_home: true
|
||||
primary_group: wheel
|
||||
groups:
|
||||
- wheel
|
||||
- plugdev
|
||||
no_user_group: true
|
||||
system: true
|
||||
no_log_init: true
|
||||
shell: /bin/zsh
|
||||
- name: user 3
|
||||
password_hash: password 3
|
||||
ssh_authorized_keys:
|
||||
- key5
|
||||
- key6
|
||||
create: {}
|
||||
groups:
|
||||
- name: group 1
|
||||
gid: 1000
|
||||
password_hash: password 1
|
||||
system: true
|
||||
- name: group 2
|
||||
password_hash: password 2
|
||||
`},
|
||||
in: in{cfg: fuzeTypes.Config{
|
||||
Passwd: fuzeTypes.Passwd{
|
||||
Users: []fuzeTypes.User{
|
||||
{
|
||||
Name: "user 1",
|
||||
PasswordHash: "password 1",
|
||||
SSHAuthorizedKeys: []string{"key1", "key2"},
|
||||
},
|
||||
{
|
||||
Name: "user 2",
|
||||
PasswordHash: "password 2",
|
||||
SSHAuthorizedKeys: []string{"key3", "key4"},
|
||||
Create: &fuzeTypes.UserCreate{
|
||||
Uid: func(i uint) *uint { return &i }(123),
|
||||
GECOS: "gecos",
|
||||
Homedir: "/home/user 2",
|
||||
NoCreateHome: true,
|
||||
PrimaryGroup: "wheel",
|
||||
Groups: []string{"wheel", "plugdev"},
|
||||
NoUserGroup: true,
|
||||
System: true,
|
||||
NoLogInit: true,
|
||||
Shell: "/bin/zsh",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "user 3",
|
||||
PasswordHash: "password 3",
|
||||
SSHAuthorizedKeys: []string{"key5", "key6"},
|
||||
Create: &fuzeTypes.UserCreate{},
|
||||
},
|
||||
},
|
||||
Groups: []fuzeTypes.Group{
|
||||
{
|
||||
Name: "group 1",
|
||||
Gid: func(i uint) *uint { return &i }(1000),
|
||||
PasswordHash: "password 1",
|
||||
System: true,
|
||||
},
|
||||
{
|
||||
Name: "group 2",
|
||||
PasswordHash: "password 2",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
out: out{cfg: types.Config{
|
||||
Ignition: types.Ignition{Version: types.IgnitionVersion{Major: 2}},
|
||||
Passwd: types.Passwd{
|
||||
@@ -505,9 +1044,9 @@ passwd:
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
cfg, err := ParseAsV2_0_0([]byte(test.in.data))
|
||||
if !reflect.DeepEqual(err, test.out.err) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
|
||||
cfg, r := ConvertAs2_0_0(test.in.cfg)
|
||||
if !reflect.DeepEqual(r, test.out.r) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.r, r)
|
||||
}
|
||||
if !reflect.DeepEqual(cfg, test.out.cfg) {
|
||||
t.Errorf("#%d: bad config: want %#v, got %#v", i, test.out.cfg, cfg)
|
||||
|
||||
27
vendor/github.com/coreos/fuze/config/convert.go
generated
vendored
27
vendor/github.com/coreos/fuze/config/convert.go
generated
vendored
@@ -17,9 +17,13 @@ package config
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"reflect"
|
||||
|
||||
"github.com/alecthomas/units"
|
||||
fuzeTypes "github.com/coreos/fuze/config/types"
|
||||
"github.com/coreos/ignition/config/types"
|
||||
"github.com/coreos/ignition/config/validate"
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
"github.com/vincent-petithory/dataurl"
|
||||
)
|
||||
|
||||
@@ -27,7 +31,7 @@ const (
|
||||
BYTES_PER_SECTOR = 512
|
||||
)
|
||||
|
||||
func ConvertAs2_0_0(in Config) (types.Config, error) {
|
||||
func ConvertAs2_0_0(in fuzeTypes.Config) (types.Config, report.Report) {
|
||||
out := types.Config{
|
||||
Ignition: types.Ignition{
|
||||
Version: types.IgnitionVersion{Major: 2, Minor: 0},
|
||||
@@ -37,7 +41,7 @@ func ConvertAs2_0_0(in Config) (types.Config, error) {
|
||||
for _, ref := range in.Ignition.Config.Append {
|
||||
newRef, err := convertConfigReference(ref)
|
||||
if err != nil {
|
||||
return types.Config{}, err
|
||||
return types.Config{}, report.ReportFromError(err, report.EntryError)
|
||||
}
|
||||
out.Ignition.Config.Append = append(out.Ignition.Config.Append, newRef)
|
||||
}
|
||||
@@ -45,7 +49,7 @@ func ConvertAs2_0_0(in Config) (types.Config, error) {
|
||||
if in.Ignition.Config.Replace != nil {
|
||||
newRef, err := convertConfigReference(*in.Ignition.Config.Replace)
|
||||
if err != nil {
|
||||
return types.Config{}, err
|
||||
return types.Config{}, report.ReportFromError(err, report.EntryError)
|
||||
}
|
||||
out.Ignition.Config.Replace = &newRef
|
||||
}
|
||||
@@ -59,11 +63,11 @@ func ConvertAs2_0_0(in Config) (types.Config, error) {
|
||||
for _, partition := range disk.Partitions {
|
||||
size, err := convertPartitionDimension(partition.Size)
|
||||
if err != nil {
|
||||
return types.Config{}, err
|
||||
return types.Config{}, report.ReportFromError(err, report.EntryError)
|
||||
}
|
||||
start, err := convertPartitionDimension(partition.Start)
|
||||
if err != nil {
|
||||
return types.Config{}, err
|
||||
return types.Config{}, report.ReportFromError(err, report.EntryError)
|
||||
}
|
||||
|
||||
newDisk.Partitions = append(newDisk.Partitions, types.Partition{
|
||||
@@ -142,7 +146,7 @@ func ConvertAs2_0_0(in Config) (types.Config, error) {
|
||||
if file.Contents.Remote.Url != "" {
|
||||
source, err := url.Parse(file.Contents.Remote.Url)
|
||||
if err != nil {
|
||||
return types.Config{}, err
|
||||
return types.Config{}, report.ReportFromError(err, report.EntryError)
|
||||
}
|
||||
|
||||
newFile.Contents = types.FileContents{Source: types.Url(*source)}
|
||||
@@ -222,14 +226,15 @@ func ConvertAs2_0_0(in Config) (types.Config, error) {
|
||||
})
|
||||
}
|
||||
|
||||
if err := out.AssertValid(); err != nil {
|
||||
return types.Config{}, err
|
||||
r := validate.ValidateWithoutSource(reflect.ValueOf(out))
|
||||
if r.IsFatal() {
|
||||
return types.Config{}, r
|
||||
}
|
||||
|
||||
return out, nil
|
||||
return out, r
|
||||
}
|
||||
|
||||
func convertConfigReference(in ConfigReference) (types.ConfigReference, error) {
|
||||
func convertConfigReference(in fuzeTypes.ConfigReference) (types.ConfigReference, error) {
|
||||
source, err := url.Parse(in.Source)
|
||||
if err != nil {
|
||||
return types.ConfigReference{}, err
|
||||
@@ -241,7 +246,7 @@ func convertConfigReference(in ConfigReference) (types.ConfigReference, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func convertVerification(in Verification) types.Verification {
|
||||
func convertVerification(in fuzeTypes.Verification) types.Verification {
|
||||
if in.Hash.Function == "" || in.Hash.Sum == "" {
|
||||
return types.Verification{}
|
||||
}
|
||||
|
||||
129
vendor/github.com/coreos/fuze/config/types.go
generated
vendored
129
vendor/github.com/coreos/fuze/config/types.go
generated
vendored
@@ -1,129 +0,0 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
type Config struct {
|
||||
Ignition struct {
|
||||
Config struct {
|
||||
Append []ConfigReference `yaml:"append"`
|
||||
Replace *ConfigReference `yaml:"replace"`
|
||||
} `yaml:"config"`
|
||||
} `yaml:"ignition"`
|
||||
Storage struct {
|
||||
Disks []struct {
|
||||
Device string `yaml:"device"`
|
||||
WipeTable bool `yaml:"wipe_table"`
|
||||
Partitions []struct {
|
||||
Label string `yaml:"label"`
|
||||
Number int `yaml:"number"`
|
||||
Size string `yaml:"size"`
|
||||
Start string `yaml:"start"`
|
||||
TypeGUID string `yaml:"type_guid"`
|
||||
} `yaml:"partitions"`
|
||||
} `yaml:"disks"`
|
||||
Arrays []struct {
|
||||
Name string `yaml:"name"`
|
||||
Level string `yaml:"level"`
|
||||
Devices []string `yaml:"devices"`
|
||||
Spares int `yaml:"spares"`
|
||||
} `yaml:"raid"`
|
||||
Filesystems []struct {
|
||||
Name string `yaml:"name"`
|
||||
Mount *struct {
|
||||
Device string `yaml:"device"`
|
||||
Format string `yaml:"format"`
|
||||
Create *struct {
|
||||
Force bool `yaml:"force"`
|
||||
Options []string `yaml:"options"`
|
||||
} `yaml:"create"`
|
||||
} `yaml:"mount"`
|
||||
Path string `yaml:"path"`
|
||||
} `yaml:"filesystems"`
|
||||
Files []struct {
|
||||
Filesystem string `yaml:"filesystem"`
|
||||
Path string `yaml:"path"`
|
||||
Contents struct {
|
||||
Remote struct {
|
||||
Url string `yaml:"url"`
|
||||
Compression string `yaml:"compression"`
|
||||
Verification Verification `yaml:"verification"`
|
||||
} `yaml:"remote"`
|
||||
Inline string `yaml:"inline"`
|
||||
} `yaml:"contents"`
|
||||
Mode int `yaml:"mode"`
|
||||
User struct {
|
||||
Id int `yaml:"id"`
|
||||
} `yaml:"user"`
|
||||
Group struct {
|
||||
Id int `yaml:"id"`
|
||||
} `yaml:"group"`
|
||||
} `yaml:"files"`
|
||||
} `yaml:"storage"`
|
||||
Systemd struct {
|
||||
Units []struct {
|
||||
Name string `yaml:"name"`
|
||||
Enable bool `yaml:"enable"`
|
||||
Mask bool `yaml:"mask"`
|
||||
Contents string `yaml:"contents"`
|
||||
DropIns []struct {
|
||||
Name string `yaml:"name"`
|
||||
Contents string `yaml:"contents"`
|
||||
} `yaml:"dropins"`
|
||||
} `yaml:"units"`
|
||||
} `yaml:"systemd"`
|
||||
Networkd struct {
|
||||
Units []struct {
|
||||
Name string `yaml:"name"`
|
||||
Contents string `yaml:"contents"`
|
||||
} `yaml:"units"`
|
||||
} `yaml:"networkd"`
|
||||
Passwd struct {
|
||||
Users []struct {
|
||||
Name string `yaml:"name"`
|
||||
PasswordHash string `yaml:"password_hash"`
|
||||
SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"`
|
||||
Create *struct {
|
||||
Uid *uint `yaml:"uid"`
|
||||
GECOS string `yaml:"gecos"`
|
||||
Homedir string `yaml:"home_dir"`
|
||||
NoCreateHome bool `yaml:"no_create_home"`
|
||||
PrimaryGroup string `yaml:"primary_group"`
|
||||
Groups []string `yaml:"groups"`
|
||||
NoUserGroup bool `yaml:"no_user_group"`
|
||||
System bool `yaml:"system"`
|
||||
NoLogInit bool `yaml:"no_log_init"`
|
||||
Shell string `yaml:"shell"`
|
||||
} `yaml:"create"`
|
||||
} `yaml:"users"`
|
||||
Groups []struct {
|
||||
Name string `yaml:"name"`
|
||||
Gid *uint `yaml:"gid"`
|
||||
PasswordHash string `yaml:"password_hash"`
|
||||
System bool `yaml:"system"`
|
||||
} `yaml:"groups"`
|
||||
} `yaml:"passwd"`
|
||||
}
|
||||
|
||||
type ConfigReference struct {
|
||||
Source string `yaml:"source"`
|
||||
Verification Verification `yaml:"verification"`
|
||||
}
|
||||
|
||||
type Verification struct {
|
||||
Hash struct {
|
||||
Function string `yaml:"function"`
|
||||
Sum string `yaml:"sum"`
|
||||
} `yaml:"hash"`
|
||||
}
|
||||
37
vendor/github.com/coreos/fuze/config/types/config.go
generated
vendored
Normal file
37
vendor/github.com/coreos/fuze/config/types/config.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
type Config struct {
|
||||
Ignition Ignition `yaml:"ignition"`
|
||||
Storage Storage `yaml:"storage"`
|
||||
Systemd Systemd `yaml:"systemd"`
|
||||
Networkd Networkd `yaml:"networkd"`
|
||||
Passwd Passwd `yaml:"passwd"`
|
||||
}
|
||||
|
||||
type Ignition struct {
|
||||
Config IgnitionConfig `yaml:"config"`
|
||||
}
|
||||
|
||||
type IgnitionConfig struct {
|
||||
Append []ConfigReference `yaml:"append"`
|
||||
Replace *ConfigReference `yaml:"replace"`
|
||||
}
|
||||
|
||||
type ConfigReference struct {
|
||||
Source string `yaml:"source"`
|
||||
Verification Verification `yaml:"verification"`
|
||||
}
|
||||
29
vendor/github.com/coreos/fuze/config/types/disks.go
generated
vendored
Normal file
29
vendor/github.com/coreos/fuze/config/types/disks.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
type Disk struct {
|
||||
Device string `yaml:"device"`
|
||||
WipeTable bool `yaml:"wipe_table"`
|
||||
Partitions []Partition `yaml:"partitions"`
|
||||
}
|
||||
|
||||
type Partition struct {
|
||||
Label string `yaml:"label"`
|
||||
Number int `yaml:"number"`
|
||||
Size string `yaml:"size"`
|
||||
Start string `yaml:"start"`
|
||||
TypeGUID string `yaml:"type_guid"`
|
||||
}
|
||||
43
vendor/github.com/coreos/fuze/config/types/files.go
generated
vendored
Normal file
43
vendor/github.com/coreos/fuze/config/types/files.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
type File struct {
|
||||
Filesystem string `yaml:"filesystem"`
|
||||
Path string `yaml:"path"`
|
||||
Mode int `yaml:"mode"`
|
||||
Contents FileContents `yaml:"contents"`
|
||||
User FileUser `yaml:"user"`
|
||||
Group FileGroup `yaml:"group"`
|
||||
}
|
||||
|
||||
type FileContents struct {
|
||||
Remote Remote `yaml:"remote"`
|
||||
Inline string `yaml:"inline"`
|
||||
}
|
||||
|
||||
type Remote struct {
|
||||
Url string `yaml:"url"`
|
||||
Compression string `yaml:"compression"`
|
||||
Verification Verification `yaml:"verification"`
|
||||
}
|
||||
|
||||
type FileUser struct {
|
||||
Id int `yaml:"id"`
|
||||
}
|
||||
|
||||
type FileGroup struct {
|
||||
Id int `yaml:"id"`
|
||||
}
|
||||
32
vendor/github.com/coreos/fuze/config/types/filesystems.go
generated
vendored
Normal file
32
vendor/github.com/coreos/fuze/config/types/filesystems.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
type Filesystem struct {
|
||||
Name string `yaml:"name"`
|
||||
Mount *Mount `yaml:"mount"`
|
||||
Path string `yaml:"path"`
|
||||
}
|
||||
|
||||
type Mount struct {
|
||||
Device string `yaml:"device"`
|
||||
Format string `yaml:"format"`
|
||||
Create *Create `yaml:"create"`
|
||||
}
|
||||
|
||||
type Create struct {
|
||||
Force bool `yaml:"force"`
|
||||
Options []string `yaml:"options"`
|
||||
}
|
||||
24
vendor/github.com/coreos/fuze/config/types/networkd.go
generated
vendored
Normal file
24
vendor/github.com/coreos/fuze/config/types/networkd.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
type Networkd struct {
|
||||
Units []NetworkdUnit `yaml:"units"`
|
||||
}
|
||||
|
||||
type NetworkdUnit struct {
|
||||
Name string `yaml:"name"`
|
||||
Contents string `yaml:"contents"`
|
||||
}
|
||||
47
vendor/github.com/coreos/fuze/config/types/passwd.go
generated
vendored
Normal file
47
vendor/github.com/coreos/fuze/config/types/passwd.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
type Passwd struct {
|
||||
Users []User `yaml:"users"`
|
||||
Groups []Group `yaml:"groups"`
|
||||
}
|
||||
|
||||
type User struct {
|
||||
Name string `yaml:"name"`
|
||||
PasswordHash string `yaml:"password_hash"`
|
||||
SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"`
|
||||
Create *UserCreate `yaml:"create"`
|
||||
}
|
||||
|
||||
type UserCreate struct {
|
||||
Uid *uint `yaml:"uid"`
|
||||
GECOS string `yaml:"gecos"`
|
||||
Homedir string `yaml:"home_dir"`
|
||||
NoCreateHome bool `yaml:"no_create_home"`
|
||||
PrimaryGroup string `yaml:"primary_group"`
|
||||
Groups []string `yaml:"groups"`
|
||||
NoUserGroup bool `yaml:"no_user_group"`
|
||||
System bool `yaml:"system"`
|
||||
NoLogInit bool `yaml:"no_log_init"`
|
||||
Shell string `yaml:"shell"`
|
||||
}
|
||||
|
||||
type Group struct {
|
||||
Name string `yaml:"name"`
|
||||
Gid *uint `yaml:"gid"`
|
||||
PasswordHash string `yaml:"password_hash"`
|
||||
System bool `yaml:"system"`
|
||||
}
|
||||
22
vendor/github.com/coreos/fuze/config/types/raid.go
generated
vendored
Normal file
22
vendor/github.com/coreos/fuze/config/types/raid.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
type Raid struct {
|
||||
Name string `yaml:"name"`
|
||||
Level string `yaml:"level"`
|
||||
Devices []string `yaml:"devices"`
|
||||
Spares int `yaml:"spares"`
|
||||
}
|
||||
22
vendor/github.com/coreos/fuze/config/types/storage.go
generated
vendored
Normal file
22
vendor/github.com/coreos/fuze/config/types/storage.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
type Storage struct {
|
||||
Disks []Disk `yaml:"disks"`
|
||||
Arrays []Raid `yaml:"raid"`
|
||||
Filesystems []Filesystem `yaml:"filesystems"`
|
||||
Files []File `yaml:"files"`
|
||||
}
|
||||
32
vendor/github.com/coreos/fuze/config/types/systemd.go
generated
vendored
Normal file
32
vendor/github.com/coreos/fuze/config/types/systemd.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
type Systemd struct {
|
||||
Units []SystemdUnit `yaml:"units"`
|
||||
}
|
||||
|
||||
type SystemdUnit struct {
|
||||
Name string `yaml:"name"`
|
||||
Enable bool `yaml:"enable"`
|
||||
Mask bool `yaml:"mask"`
|
||||
Contents string `yaml:"contents"`
|
||||
DropIns []SystemdUnitDropIn `yaml:"dropins"`
|
||||
}
|
||||
|
||||
type SystemdUnitDropIn struct {
|
||||
Name string `yaml:"name"`
|
||||
Contents string `yaml:"contents"`
|
||||
}
|
||||
24
vendor/github.com/coreos/fuze/config/types/verification.go
generated
vendored
Normal file
24
vendor/github.com/coreos/fuze/config/types/verification.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
type Verification struct {
|
||||
Hash Hash `yaml:"hash"`
|
||||
}
|
||||
|
||||
type Hash struct {
|
||||
Function string `yaml:"function"`
|
||||
Sum string `yaml:"sum"`
|
||||
}
|
||||
63
vendor/github.com/coreos/fuze/config/validate.go
generated
vendored
63
vendor/github.com/coreos/fuze/config/validate.go
generated
vendored
@@ -1,63 +0,0 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type ErrKeysUnrecognized []string
|
||||
|
||||
func (e ErrKeysUnrecognized) Error() string {
|
||||
return fmt.Sprintf("unrecognized keys: %v", []string(e))
|
||||
}
|
||||
|
||||
func assertKeysValid(value interface{}, refType reflect.Type) ErrKeysUnrecognized {
|
||||
var err ErrKeysUnrecognized
|
||||
|
||||
if refType.Kind() == reflect.Ptr {
|
||||
refType = refType.Elem()
|
||||
}
|
||||
switch value.(type) {
|
||||
case map[interface{}]interface{}:
|
||||
ks := value.(map[interface{}]interface{})
|
||||
keys:
|
||||
for key := range ks {
|
||||
for i := 0; i < refType.NumField(); i++ {
|
||||
sf := refType.Field(i)
|
||||
tv := sf.Tag.Get("yaml")
|
||||
if tv == key {
|
||||
if serr := assertKeysValid(ks[key], sf.Type); serr != nil {
|
||||
err = append(err, serr...)
|
||||
}
|
||||
continue keys
|
||||
}
|
||||
}
|
||||
|
||||
err = append(err, fmt.Sprintf("%v", key))
|
||||
}
|
||||
case []interface{}:
|
||||
ks := value.([]interface{})
|
||||
for i := range ks {
|
||||
if serr := assertKeysValid(ks[i], refType.Elem()); serr != nil {
|
||||
err = append(err, serr...)
|
||||
}
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
3
vendor/github.com/coreos/fuze/config/vendor.manifest
generated
vendored
3
vendor/github.com/coreos/fuze/config/vendor.manifest
generated
vendored
@@ -1,5 +1,6 @@
|
||||
# If you manipulate the contents of vendor/, amend this accordingly.
|
||||
# pkg version
|
||||
github.com/ajeddeloh/yaml 1072abfea31191db507785e2e0c1b8d1440d35a5
|
||||
github.com/alecthomas/units 6b4e7dc5e3143b85ea77909c72caf89416fc2915
|
||||
github.com/coreos/ignition/config b6850837b3b9bd17b673e58b5c406b5e4192ddca
|
||||
github.com/coreos/ignition/config 3ffd793b1292c6b0b3519bce214bdb41f336faa7
|
||||
github.com/vincent-petithory/dataurl 9a301d65acbb728fcc3ace14f45f511a4cfeea9c
|
||||
|
||||
14
vendor/github.com/coreos/fuze/internal/main.go
generated
vendored
14
vendor/github.com/coreos/fuze/internal/main.go
generated
vendored
@@ -56,9 +56,17 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
cfg, err := config.ParseAsV2_0_0(dataIn)
|
||||
if err != nil {
|
||||
stderr("Failed to parse: %v", err)
|
||||
fuzeCfg, report := config.Parse(dataIn)
|
||||
stderr(report.String())
|
||||
if report.IsFatal() {
|
||||
stderr("Failed to parse fuze config")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
cfg, report := config.ConvertAs2_0_0(fuzeCfg)
|
||||
stderr(report.String())
|
||||
if report.IsFatal() {
|
||||
stderr("Generated Ignition config was invalid.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
||||
12
vendor/github.com/coreos/ignition/.travis.yml
generated
vendored
12
vendor/github.com/coreos/ignition/.travis.yml
generated
vendored
@@ -5,15 +5,16 @@ language: go
|
||||
go_import_path: github.com/coreos/ignition
|
||||
|
||||
go:
|
||||
- 1.5
|
||||
- 1.6
|
||||
- 1.5.4
|
||||
- 1.6.3
|
||||
- 1.7
|
||||
|
||||
env:
|
||||
global:
|
||||
- GO15VENDOREXPERIMENT=1
|
||||
matrix:
|
||||
- TARGET=amd64
|
||||
- TARGET=arm64
|
||||
- TARGET=arm64 GIMME_ARCH=arm64 GIMME_CGO_ENABLED=1
|
||||
|
||||
addons:
|
||||
apt:
|
||||
@@ -27,9 +28,8 @@ install:
|
||||
|
||||
script:
|
||||
- if [ "${TARGET}" == "amd64" ]; then
|
||||
GOARCH="${TARGET}" ./test;
|
||||
GOARCH="${TARGET}" ./test;
|
||||
elif [ "${TARGET}" == "arm64" ]; then
|
||||
eval "$(GIMME_ARCH=${TARGET} GIMME_CGO_ENABLED=1 ./gimme.local ${TRAVIS_GO_VERSION})";
|
||||
GOARCH="${TARGET}" ./build;
|
||||
file "bin/${TARGET}/ignition" | egrep 'aarch64';
|
||||
file "bin/${TARGET}/ignition" | egrep 'aarch64';
|
||||
fi
|
||||
|
||||
79
vendor/github.com/coreos/ignition/NEWS
generated
vendored
79
vendor/github.com/coreos/ignition/NEWS
generated
vendored
@@ -1,3 +1,82 @@
|
||||
07-Oct-2016 IGNITION v0.11.2
|
||||
|
||||
Bug Fixes
|
||||
|
||||
- Correctly set the partition typecode
|
||||
|
||||
Changes
|
||||
|
||||
- Update the services for the GCE OEM
|
||||
|
||||
20-Sep-2016 IGNITION v0.11.1
|
||||
|
||||
Bug Fixes
|
||||
|
||||
- Fix potential deadlock when waiting for multiple disks
|
||||
|
||||
07-Sep-2016 IGNITION v0.11.0
|
||||
|
||||
Features
|
||||
|
||||
- Add support for DigitalOcean
|
||||
- Add experimental support for OpenStack
|
||||
|
||||
26-Aug-2016 IGNITION v0.10.1
|
||||
|
||||
Bug Fixes
|
||||
|
||||
- Fix handling of oem:// URLs
|
||||
- Use stable symlinks when operating on devices
|
||||
- Retry failed requests when fetching Packet userdata
|
||||
- Log the raw configurations instead of the parsed result
|
||||
|
||||
23-Aug-2016 IGNITION v0.10.0
|
||||
|
||||
Features
|
||||
|
||||
- Add support for QEMU Firmware Configuration Device
|
||||
|
||||
15-Aug-2016 IGNITION v0.9.2
|
||||
|
||||
Bug Fixes
|
||||
|
||||
- Do not retry HTTP requests that result in non-5xx status codes
|
||||
|
||||
11-Aug-2016 IGNITION v0.9.1
|
||||
|
||||
Bug Fixes
|
||||
|
||||
- Properly validate data URLs
|
||||
|
||||
11-Aug-2016 IGNITION v0.9.0
|
||||
|
||||
Features
|
||||
|
||||
- Add detailed configuration validation
|
||||
|
||||
Bug Fixes
|
||||
|
||||
- Add retry to all HTTP requests
|
||||
- Fix potential panic when parsing certain URLs
|
||||
|
||||
26-Jul-2016 IGNITION v0.8.0
|
||||
|
||||
Features
|
||||
|
||||
- Add support for Packet
|
||||
|
||||
13-Jul-2016 IGNITION v0.7.1
|
||||
|
||||
Bug Fixes
|
||||
|
||||
- Interpret files without a URL to be empty instead of invalid
|
||||
- HTTP fetches time out while waiting for response header instead of body
|
||||
- Stream remote assets to disk instead of loading them into memory
|
||||
|
||||
Changes
|
||||
|
||||
- Improve configuration validation
|
||||
|
||||
15-Jun-2016 IGNITION v0.7.0
|
||||
|
||||
Features
|
||||
|
||||
7
vendor/github.com/coreos/ignition/README.md
generated
vendored
7
vendor/github.com/coreos/ignition/README.md
generated
vendored
@@ -6,10 +6,7 @@ Ignition is the utility used by CoreOS Linux to manipulate disks during the init
|
||||
|
||||
Odds are good that you don't want to invoke Ignition directly. In fact, it isn't even present in the CoreOS Linux root filesystem. Take a look at the [Getting Started Guide][getting started] for details on providing Ignition with a runtime configuration.
|
||||
|
||||
Use the [bug tracker][issues] to report bugs.
|
||||
|
||||
[getting started]: doc/getting-started.md
|
||||
|
||||
**Ignition is under very active development!**
|
||||
|
||||
Use the [bug tracker][issues] to report bugs, but please avoid the urge to report lack of features for now.
|
||||
|
||||
[issues]: https://github.com/coreos/bugs/issues/new?labels=component/ignition
|
||||
|
||||
99
vendor/github.com/coreos/ignition/config/config.go
generated
vendored
99
vendor/github.com/coreos/ignition/config/config.go
generated
vendored
@@ -16,13 +16,16 @@ package config
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/coreos/ignition/config/types"
|
||||
"github.com/coreos/ignition/config/v1"
|
||||
"github.com/coreos/ignition/config/validate"
|
||||
astjson "github.com/coreos/ignition/config/validate/astjson"
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
|
||||
json "github.com/ajeddeloh/go-json"
|
||||
"go4.org/errorutil"
|
||||
)
|
||||
|
||||
@@ -31,38 +34,100 @@ var (
|
||||
ErrEmpty = errors.New("not a config (empty)")
|
||||
ErrScript = errors.New("not a config (found coreos-cloudinit script)")
|
||||
ErrDeprecated = errors.New("config format deprecated")
|
||||
ErrInvalid = errors.New("config is not valid")
|
||||
)
|
||||
|
||||
func Parse(rawConfig []byte) (types.Config, error) {
|
||||
// Parse parses the raw config into a types.Config struct and generates a report of any
|
||||
// errors, warnings, info, and deprecations it encountered
|
||||
func Parse(rawConfig []byte) (types.Config, report.Report, error) {
|
||||
switch majorVersion(rawConfig) {
|
||||
case 1:
|
||||
config, err := ParseFromV1(rawConfig)
|
||||
if err != nil {
|
||||
return types.Config{}, err
|
||||
return types.Config{}, report.ReportFromError(err, report.EntryError), err
|
||||
}
|
||||
|
||||
return config, ErrDeprecated
|
||||
return config, report.ReportFromError(ErrDeprecated, report.EntryDeprecated), nil
|
||||
default:
|
||||
return ParseFromLatest(rawConfig)
|
||||
}
|
||||
}
|
||||
|
||||
func ParseFromLatest(rawConfig []byte) (config types.Config, err error) {
|
||||
if err = json.Unmarshal(rawConfig, &config); err == nil {
|
||||
err = config.Ignition.Version.AssertValid()
|
||||
} else if isEmpty(rawConfig) {
|
||||
err = ErrEmpty
|
||||
func ParseFromLatest(rawConfig []byte) (types.Config, report.Report, error) {
|
||||
if isEmpty(rawConfig) {
|
||||
return types.Config{}, report.Report{}, ErrEmpty
|
||||
} else if isCloudConfig(rawConfig) {
|
||||
err = ErrCloudConfig
|
||||
return types.Config{}, report.Report{}, ErrCloudConfig
|
||||
} else if isScript(rawConfig) {
|
||||
err = ErrScript
|
||||
}
|
||||
if serr, ok := err.(*json.SyntaxError); ok {
|
||||
line, col, highlight := errorutil.HighlightBytePosition(bytes.NewReader(rawConfig), serr.Offset)
|
||||
err = fmt.Errorf("error at line %d, column %d\n%s%v", line, col, highlight, err)
|
||||
return types.Config{}, report.Report{}, ErrScript
|
||||
}
|
||||
|
||||
return
|
||||
var err error
|
||||
var config types.Config
|
||||
|
||||
// These errors are fatal and the config should not be further validated
|
||||
if err = json.Unmarshal(rawConfig, &config); err == nil {
|
||||
versionReport := config.Ignition.Version.Validate()
|
||||
if versionReport.IsFatal() {
|
||||
return types.Config{}, versionReport, ErrInvalid
|
||||
}
|
||||
}
|
||||
|
||||
// Handle json syntax and type errors first, since they are fatal but have offset info
|
||||
if serr, ok := err.(*json.SyntaxError); ok {
|
||||
line, col, highlight := errorutil.HighlightBytePosition(bytes.NewReader(rawConfig), serr.Offset)
|
||||
return types.Config{},
|
||||
report.Report{
|
||||
Entries: []report.Entry{{
|
||||
Kind: report.EntryError,
|
||||
Message: serr.Error(),
|
||||
Line: line,
|
||||
Column: col,
|
||||
Highlight: highlight,
|
||||
}},
|
||||
},
|
||||
ErrInvalid
|
||||
}
|
||||
|
||||
if terr, ok := err.(*json.UnmarshalTypeError); ok {
|
||||
line, col, highlight := errorutil.HighlightBytePosition(bytes.NewReader(rawConfig), terr.Offset)
|
||||
return types.Config{},
|
||||
report.Report{
|
||||
Entries: []report.Entry{{
|
||||
Kind: report.EntryError,
|
||||
Message: terr.Error(),
|
||||
Line: line,
|
||||
Column: col,
|
||||
Highlight: highlight,
|
||||
}},
|
||||
},
|
||||
ErrInvalid
|
||||
}
|
||||
|
||||
// Handle other fatal errors (i.e. invalid version)
|
||||
if err != nil {
|
||||
return types.Config{}, report.ReportFromError(err, report.EntryError), err
|
||||
}
|
||||
|
||||
// Unmarshal again to a json.Node to get offset information for building a report
|
||||
var ast json.Node
|
||||
var r report.Report
|
||||
configValue := reflect.ValueOf(config)
|
||||
if err := json.Unmarshal(rawConfig, &ast); err != nil {
|
||||
r.Add(report.Entry{
|
||||
Kind: report.EntryWarning,
|
||||
Message: "Ignition could not unmarshal your config for reporting line numbers. This should never happen. Please file a bug.",
|
||||
})
|
||||
r.Merge(validate.ValidateWithoutSource(configValue))
|
||||
} else {
|
||||
r.Merge(validate.Validate(configValue, astjson.FromJsonRoot(ast), bytes.NewReader(rawConfig)))
|
||||
}
|
||||
|
||||
if r.IsFatal() {
|
||||
return types.Config{}, r, ErrInvalid
|
||||
}
|
||||
|
||||
return config, r, nil
|
||||
}
|
||||
|
||||
func ParseFromV1(rawConfig []byte) (types.Config, error) {
|
||||
|
||||
8
vendor/github.com/coreos/ignition/config/config_test.go
generated
vendored
8
vendor/github.com/coreos/ignition/config/config_test.go
generated
vendored
@@ -37,7 +37,7 @@ func TestParse(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
in: in{config: []byte(`{"ignitionVersion": 1}`)},
|
||||
out: out{err: ErrDeprecated},
|
||||
out: out{config: types.Config{Ignition: types.Ignition{Version: types.IgnitionVersion{Major: 2, Minor: 0}}}},
|
||||
},
|
||||
{
|
||||
in: in{config: []byte(`{"ignition": {"version": "1.0.0"}}`)},
|
||||
@@ -49,11 +49,11 @@ func TestParse(t *testing.T) {
|
||||
},
|
||||
{
|
||||
in: in{config: []byte(`{"ignition": {"version": "2.1.0"}}`)},
|
||||
out: out{err: types.ErrNewVersion},
|
||||
out: out{err: ErrInvalid},
|
||||
},
|
||||
{
|
||||
in: in{config: []byte(`{}`)},
|
||||
out: out{err: types.ErrOldVersion},
|
||||
out: out{err: ErrInvalid},
|
||||
},
|
||||
{
|
||||
in: in{config: []byte{}},
|
||||
@@ -91,7 +91,7 @@ func TestParse(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
config, err := Parse(test.in.config)
|
||||
config, _, err := Parse(test.in.config)
|
||||
if test.out.err != err {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
|
||||
}
|
||||
|
||||
24
vendor/github.com/coreos/ignition/config/types/compression.go
generated
vendored
24
vendor/github.com/coreos/ignition/config/types/compression.go
generated
vendored
@@ -15,8 +15,9 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -25,26 +26,11 @@ var (
|
||||
|
||||
type Compression string
|
||||
|
||||
func (c *Compression) UnmarshalJSON(data []byte) error {
|
||||
return c.unmarshal(func(tc interface{}) error {
|
||||
return json.Unmarshal(data, tc)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Compression) unmarshal(unmarshal func(interface{}) error) error {
|
||||
var tc string
|
||||
if err := unmarshal(&tc); err != nil {
|
||||
return err
|
||||
}
|
||||
*c = Compression(tc)
|
||||
return c.AssertValid()
|
||||
}
|
||||
|
||||
func (c Compression) AssertValid() error {
|
||||
func (c Compression) Validate() report.Report {
|
||||
switch c {
|
||||
case "", "gzip":
|
||||
default:
|
||||
return ErrCompressionInvalid
|
||||
return report.ReportFromError(ErrCompressionInvalid, report.EntryError)
|
||||
}
|
||||
return nil
|
||||
return report.Report{}
|
||||
}
|
||||
|
||||
73
vendor/github.com/coreos/ignition/config/types/config.go
generated
vendored
73
vendor/github.com/coreos/ignition/config/types/config.go
generated
vendored
@@ -15,9 +15,11 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"fmt"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -35,44 +37,51 @@ type Config struct {
|
||||
Passwd Passwd `json:"passwd,omitempty"`
|
||||
}
|
||||
|
||||
func (c Config) AssertValid() error {
|
||||
return assertStructValid(reflect.ValueOf(c))
|
||||
func (c Config) Validate() report.Report {
|
||||
r := report.Report{}
|
||||
rules := []rule{
|
||||
checkFilesFilesystems,
|
||||
checkDuplicateFilesystems,
|
||||
}
|
||||
|
||||
for _, rule := range rules {
|
||||
rule(c, &r)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func assertValid(vObj reflect.Value) error {
|
||||
if !vObj.IsValid() {
|
||||
return nil
|
||||
}
|
||||
type rule func(cfg Config, report *report.Report)
|
||||
|
||||
if obj, ok := vObj.Interface().(interface {
|
||||
AssertValid() error
|
||||
}); ok && !(vObj.Kind() == reflect.Ptr && vObj.IsNil()) {
|
||||
if err := obj.AssertValid(); err != nil {
|
||||
return err
|
||||
func checkFilesFilesystems(cfg Config, r *report.Report) {
|
||||
filesystems := map[string]struct{}{"root": {}}
|
||||
for _, filesystem := range cfg.Storage.Filesystems {
|
||||
filesystems[filesystem.Name] = struct{}{}
|
||||
}
|
||||
for _, file := range cfg.Storage.Files {
|
||||
if file.Filesystem == "" {
|
||||
// Filesystem was not specified. This is an error, but its handled in types.File's Validate, not here
|
||||
continue
|
||||
}
|
||||
_, ok := filesystems[file.Filesystem]
|
||||
if !ok {
|
||||
r.Add(report.Entry{
|
||||
Kind: report.EntryWarning,
|
||||
Message: fmt.Sprintf("File %q references nonexistent filesystem %q. (This is ok if it is defined in a referenced config)",
|
||||
file.Path, file.Filesystem),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
switch vObj.Kind() {
|
||||
case reflect.Ptr:
|
||||
return assertValid(vObj.Elem())
|
||||
case reflect.Struct:
|
||||
return assertStructValid(vObj)
|
||||
case reflect.Slice:
|
||||
for i := 0; i < vObj.Len(); i++ {
|
||||
if err := assertValid(vObj.Index(i)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func assertStructValid(vObj reflect.Value) error {
|
||||
for i := 0; i < vObj.Type().NumField(); i++ {
|
||||
if err := assertValid(vObj.Field(i)); err != nil {
|
||||
return err
|
||||
func checkDuplicateFilesystems(cfg Config, r *report.Report) {
|
||||
filesystems := map[string]struct{}{"root": {}}
|
||||
for _, filesystem := range cfg.Storage.Filesystems {
|
||||
if _, ok := filesystems[filesystem.Name]; ok {
|
||||
r.Add(report.Entry{
|
||||
Kind: report.EntryWarning,
|
||||
Message: fmt.Sprintf("Filesystem %q shadows exising filesystem definition", filesystem.Name),
|
||||
})
|
||||
}
|
||||
filesystems[filesystem.Name] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
40
vendor/github.com/coreos/ignition/config/types/disk.go
generated
vendored
40
vendor/github.com/coreos/ignition/config/types/disk.go
generated
vendored
@@ -15,8 +15,9 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
type Disk struct {
|
||||
@@ -24,32 +25,35 @@ type Disk struct {
|
||||
WipeTable bool `json:"wipeTable,omitempty"`
|
||||
Partitions []Partition `json:"partitions,omitempty"`
|
||||
}
|
||||
type disk Disk
|
||||
|
||||
func (n *Disk) UnmarshalJSON(data []byte) error {
|
||||
tn := disk(*n)
|
||||
if err := json.Unmarshal(data, &tn); err != nil {
|
||||
return err
|
||||
}
|
||||
*n = Disk(tn)
|
||||
return n.AssertValid()
|
||||
}
|
||||
|
||||
func (n Disk) AssertValid() error {
|
||||
func (n Disk) Validate() report.Report {
|
||||
r := report.Report{}
|
||||
if len(n.Device) == 0 {
|
||||
return fmt.Errorf("disk device is required")
|
||||
r.Add(report.Entry{
|
||||
Message: "disk device is required",
|
||||
Kind: report.EntryError,
|
||||
})
|
||||
}
|
||||
if n.partitionNumbersCollide() {
|
||||
return fmt.Errorf("disk %q: partition numbers collide", n.Device)
|
||||
r.Add(report.Entry{
|
||||
Message: fmt.Sprintf("disk %q: partition numbers collide", n.Device),
|
||||
Kind: report.EntryError,
|
||||
})
|
||||
}
|
||||
if n.partitionsOverlap() {
|
||||
return fmt.Errorf("disk %q: partitions overlap", n.Device)
|
||||
r.Add(report.Entry{
|
||||
Message: fmt.Sprintf("disk %q: partitions overlap", n.Device),
|
||||
Kind: report.EntryError,
|
||||
})
|
||||
}
|
||||
if n.partitionsMisaligned() {
|
||||
return fmt.Errorf("disk %q: partitions misaligned", n.Device)
|
||||
r.Add(report.Entry{
|
||||
Message: fmt.Sprintf("disk %q: partitions misaligned", n.Device),
|
||||
Kind: report.EntryError,
|
||||
})
|
||||
}
|
||||
// Disks which get to this point will likely succeed in sgdisk
|
||||
return nil
|
||||
// Disks which have no errors at this point will likely succeed in sgdisk
|
||||
return r
|
||||
}
|
||||
|
||||
// partitionNumbersCollide returns true if partition numbers in n.Partitions are not unique.
|
||||
|
||||
27
vendor/github.com/coreos/ignition/config/types/file.go
generated
vendored
27
vendor/github.com/coreos/ignition/config/types/file.go
generated
vendored
@@ -15,13 +15,15 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrFileIllegalMode = errors.New("illegal file mode")
|
||||
ErrNoFilesystem = errors.New("no filesystem specified")
|
||||
)
|
||||
|
||||
type File struct {
|
||||
@@ -33,6 +35,13 @@ type File struct {
|
||||
Group FileGroup `json:"group,omitempty"`
|
||||
}
|
||||
|
||||
func (f File) Validate() report.Report {
|
||||
if f.Filesystem == "" {
|
||||
return report.ReportFromError(ErrNoFilesystem, report.EntryError)
|
||||
}
|
||||
return report.Report{}
|
||||
}
|
||||
|
||||
type FileUser struct {
|
||||
Id int `json:"id,omitempty"`
|
||||
}
|
||||
@@ -48,20 +57,10 @@ type FileContents struct {
|
||||
}
|
||||
|
||||
type FileMode os.FileMode
|
||||
type fileMode FileMode
|
||||
|
||||
func (m *FileMode) UnmarshalJSON(data []byte) error {
|
||||
tm := fileMode(*m)
|
||||
if err := json.Unmarshal(data, &tm); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = FileMode(tm)
|
||||
return m.AssertValid()
|
||||
}
|
||||
|
||||
func (m FileMode) AssertValid() error {
|
||||
func (m FileMode) Validate() report.Report {
|
||||
if (m &^ 07777) != 0 {
|
||||
return ErrFileIllegalMode
|
||||
return report.ReportFromError(ErrFileIllegalMode, report.EntryError)
|
||||
}
|
||||
return nil
|
||||
return report.Report{}
|
||||
}
|
||||
|
||||
44
vendor/github.com/coreos/ignition/config/types/file_test.go
generated
vendored
44
vendor/github.com/coreos/ignition/config/types/file_test.go
generated
vendored
@@ -15,47 +15,13 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
func TestFileModeUnmarshalJSON(t *testing.T) {
|
||||
type in struct {
|
||||
data string
|
||||
}
|
||||
type out struct {
|
||||
mode FileMode
|
||||
err error
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
in in
|
||||
out out
|
||||
}{
|
||||
{
|
||||
in: in{data: `420`},
|
||||
out: out{mode: FileMode(420)},
|
||||
},
|
||||
{
|
||||
in: in{data: `9999`},
|
||||
out: out{mode: FileMode(9999), err: ErrFileIllegalMode},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
var mode FileMode
|
||||
err := json.Unmarshal([]byte(test.in.data), &mode)
|
||||
if !reflect.DeepEqual(test.out.err, err) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.out.mode, mode) {
|
||||
t.Errorf("#%d: bad mode: want %#o, got %#o", i, test.out.mode, mode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileAssertValid(t *testing.T) {
|
||||
func TestFileValidate(t *testing.T) {
|
||||
type in struct {
|
||||
mode FileMode
|
||||
}
|
||||
@@ -90,8 +56,8 @@ func TestFileAssertValid(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
err := test.in.mode.AssertValid()
|
||||
if !reflect.DeepEqual(test.out.err, err) {
|
||||
err := test.in.mode.Validate()
|
||||
if !reflect.DeepEqual(report.ReportFromError(test.out.err, report.EntryError), err) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
93
vendor/github.com/coreos/ignition/config/types/filesystem.go
generated
vendored
93
vendor/github.com/coreos/ignition/config/types/filesystem.go
generated
vendored
@@ -15,8 +15,9 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -30,7 +31,6 @@ type Filesystem struct {
|
||||
Mount *FilesystemMount `json:"mount,omitempty"`
|
||||
Path *Path `json:"path,omitempty"`
|
||||
}
|
||||
type filesystem Filesystem
|
||||
|
||||
type FilesystemMount struct {
|
||||
Device Path `json:"device,omitempty"`
|
||||
@@ -43,96 +43,25 @@ type FilesystemCreate struct {
|
||||
Options MkfsOptions `json:"options,omitempty"`
|
||||
}
|
||||
|
||||
func (f *Filesystem) UnmarshalJSON(data []byte) error {
|
||||
tf := filesystem(*f)
|
||||
if err := json.Unmarshal(data, &tf); err != nil {
|
||||
return err
|
||||
func (f Filesystem) Validate() report.Report {
|
||||
if f.Mount == nil && f.Path == nil {
|
||||
return report.ReportFromError(ErrFilesystemNoMountPath, report.EntryError)
|
||||
}
|
||||
*f = Filesystem(tf)
|
||||
return f.AssertValid()
|
||||
}
|
||||
|
||||
func (f Filesystem) AssertValid() error {
|
||||
hasMount := false
|
||||
hasPath := false
|
||||
|
||||
if f.Mount != nil {
|
||||
hasMount = true
|
||||
if err := f.Mount.AssertValid(); err != nil {
|
||||
return err
|
||||
}
|
||||
if f.Mount != nil && f.Path != nil {
|
||||
return report.ReportFromError(ErrFilesystemMountAndPath, report.EntryError)
|
||||
}
|
||||
|
||||
if f.Path != nil {
|
||||
hasPath = true
|
||||
if err := f.Path.AssertValid(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !hasMount && !hasPath {
|
||||
return ErrFilesystemNoMountPath
|
||||
} else if hasMount && hasPath {
|
||||
return ErrFilesystemMountAndPath
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type filesystemMount FilesystemMount
|
||||
|
||||
func (f *FilesystemMount) UnmarshalJSON(data []byte) error {
|
||||
tf := filesystemMount(*f)
|
||||
if err := json.Unmarshal(data, &tf); err != nil {
|
||||
return err
|
||||
}
|
||||
*f = FilesystemMount(tf)
|
||||
return f.AssertValid()
|
||||
}
|
||||
|
||||
func (f FilesystemMount) AssertValid() error {
|
||||
if err := f.Device.AssertValid(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f.Format.AssertValid(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return report.Report{}
|
||||
}
|
||||
|
||||
type FilesystemFormat string
|
||||
type filesystemFormat FilesystemFormat
|
||||
|
||||
func (f *FilesystemFormat) UnmarshalJSON(data []byte) error {
|
||||
tf := filesystemFormat(*f)
|
||||
if err := json.Unmarshal(data, &tf); err != nil {
|
||||
return err
|
||||
}
|
||||
*f = FilesystemFormat(tf)
|
||||
return f.AssertValid()
|
||||
}
|
||||
|
||||
func (f FilesystemFormat) AssertValid() error {
|
||||
func (f FilesystemFormat) Validate() report.Report {
|
||||
switch f {
|
||||
case "ext4", "btrfs", "xfs":
|
||||
return nil
|
||||
return report.Report{}
|
||||
default:
|
||||
return ErrFilesystemInvalidFormat
|
||||
return report.ReportFromError(ErrFilesystemInvalidFormat, report.EntryError)
|
||||
}
|
||||
}
|
||||
|
||||
type MkfsOptions []string
|
||||
type mkfsOptions MkfsOptions
|
||||
|
||||
func (o *MkfsOptions) UnmarshalJSON(data []byte) error {
|
||||
to := mkfsOptions(*o)
|
||||
if err := json.Unmarshal(data, &to); err != nil {
|
||||
return err
|
||||
}
|
||||
*o = MkfsOptions(to)
|
||||
return o.AssertValid()
|
||||
}
|
||||
|
||||
func (o MkfsOptions) AssertValid() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
128
vendor/github.com/coreos/ignition/config/types/filesystem_test.go
generated
vendored
128
vendor/github.com/coreos/ignition/config/types/filesystem_test.go
generated
vendored
@@ -15,47 +15,13 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
func TestFilesystemFormatUnmarshalJSON(t *testing.T) {
|
||||
type in struct {
|
||||
data string
|
||||
}
|
||||
type out struct {
|
||||
format FilesystemFormat
|
||||
err error
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
in in
|
||||
out out
|
||||
}{
|
||||
{
|
||||
in: in{data: `"ext4"`},
|
||||
out: out{format: FilesystemFormat("ext4")},
|
||||
},
|
||||
{
|
||||
in: in{data: `"bad"`},
|
||||
out: out{format: FilesystemFormat("bad"), err: ErrFilesystemInvalidFormat},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
var format FilesystemFormat
|
||||
err := json.Unmarshal([]byte(test.in.data), &format)
|
||||
if !reflect.DeepEqual(test.out.err, err) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.out.format, format) {
|
||||
t.Errorf("#%d: bad format: want %#v, got %#v", i, test.out.format, format)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilesystemFormatAssertValid(t *testing.T) {
|
||||
func TestFilesystemFormatValidate(t *testing.T) {
|
||||
type in struct {
|
||||
format FilesystemFormat
|
||||
}
|
||||
@@ -82,80 +48,14 @@ func TestFilesystemFormatAssertValid(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
err := test.in.format.AssertValid()
|
||||
if !reflect.DeepEqual(test.out.err, err) {
|
||||
err := test.in.format.Validate()
|
||||
if !reflect.DeepEqual(report.ReportFromError(test.out.err, report.EntryError), err) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMkfsOptionsUnmarshalJSON(t *testing.T) {
|
||||
type in struct {
|
||||
data string
|
||||
}
|
||||
type out struct {
|
||||
options MkfsOptions
|
||||
err error
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
in in
|
||||
out out
|
||||
}{
|
||||
{
|
||||
in: in{data: `["--label=ROOT"]`},
|
||||
out: out{options: MkfsOptions([]string{"--label=ROOT"})},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
var options MkfsOptions
|
||||
err := json.Unmarshal([]byte(test.in.data), &options)
|
||||
if !reflect.DeepEqual(test.out.err, err) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.out.options, options) {
|
||||
t.Errorf("#%d: bad format: want %#v, got %#v", i, test.out.options, options)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilesystemUnmarshalJSON(t *testing.T) {
|
||||
type in struct {
|
||||
data string
|
||||
}
|
||||
type out struct {
|
||||
filesystem Filesystem
|
||||
err error
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
in in
|
||||
out out
|
||||
}{
|
||||
{
|
||||
in: in{data: `{"mount": {"device": "/foo", "format": "ext4"}}`},
|
||||
out: out{filesystem: Filesystem{Mount: &FilesystemMount{Device: "/foo", Format: "ext4"}}},
|
||||
},
|
||||
{
|
||||
in: in{data: `{"mount": {"format": "ext4"}}`},
|
||||
out: out{err: ErrPathRelative},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
var filesystem Filesystem
|
||||
err := json.Unmarshal([]byte(test.in.data), &filesystem)
|
||||
if !reflect.DeepEqual(test.out.err, err) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.out.filesystem, filesystem) {
|
||||
t.Errorf("#%d: bad filesystem: want %#v, got %#v", i, test.out.filesystem, filesystem)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilesystemAssertValid(t *testing.T) {
|
||||
func TestFilesystemValidate(t *testing.T) {
|
||||
type in struct {
|
||||
filesystem Filesystem
|
||||
}
|
||||
@@ -171,22 +71,10 @@ func TestFilesystemAssertValid(t *testing.T) {
|
||||
in: in{filesystem: Filesystem{Mount: &FilesystemMount{Device: "/foo", Format: "ext4"}}},
|
||||
out: out{},
|
||||
},
|
||||
{
|
||||
in: in{filesystem: Filesystem{Mount: &FilesystemMount{Device: "/foo"}}},
|
||||
out: out{err: ErrFilesystemInvalidFormat},
|
||||
},
|
||||
{
|
||||
in: in{filesystem: Filesystem{Mount: &FilesystemMount{Format: "ext4"}}},
|
||||
out: out{err: ErrPathRelative},
|
||||
},
|
||||
{
|
||||
in: in{filesystem: Filesystem{Path: func(p Path) *Path { return &p }("/mount")}},
|
||||
out: out{},
|
||||
},
|
||||
{
|
||||
in: in{filesystem: Filesystem{Path: func(p Path) *Path { return &p }("mount")}},
|
||||
out: out{err: ErrPathRelative},
|
||||
},
|
||||
{
|
||||
in: in{filesystem: Filesystem{Path: func(p Path) *Path { return &p }("/mount"), Mount: &FilesystemMount{Device: "/foo", Format: "ext4"}}},
|
||||
out: out{err: ErrFilesystemMountAndPath},
|
||||
@@ -198,8 +86,8 @@ func TestFilesystemAssertValid(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
err := test.in.filesystem.AssertValid()
|
||||
if !reflect.DeepEqual(test.out.err, err) {
|
||||
err := test.in.filesystem.Validate()
|
||||
if !reflect.DeepEqual(report.ReportFromError(test.out.err, report.EntryError), err) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
12
vendor/github.com/coreos/ignition/config/types/hash.go
generated
vendored
12
vendor/github.com/coreos/ignition/config/types/hash.go
generated
vendored
@@ -20,6 +20,8 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -47,25 +49,25 @@ func (h *Hash) UnmarshalJSON(data []byte) error {
|
||||
h.Function = parts[0]
|
||||
h.Sum = parts[1]
|
||||
|
||||
return h.AssertValid()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h Hash) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + h.Function + "-" + h.Sum + `"`), nil
|
||||
}
|
||||
|
||||
func (h Hash) AssertValid() error {
|
||||
func (h Hash) Validate() report.Report {
|
||||
var hash crypto.Hash
|
||||
switch h.Function {
|
||||
case "sha512":
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return ErrHashUnrecognized
|
||||
return report.ReportFromError(ErrHashUnrecognized, report.EntryError)
|
||||
}
|
||||
|
||||
if len(h.Sum) != hex.EncodedLen(hash.Size()) {
|
||||
return ErrHashWrongSize
|
||||
return report.ReportFromError(ErrHashWrongSize, report.EntryError)
|
||||
}
|
||||
|
||||
return nil
|
||||
return report.Report{}
|
||||
}
|
||||
|
||||
8
vendor/github.com/coreos/ignition/config/types/hash_test.go
generated
vendored
8
vendor/github.com/coreos/ignition/config/types/hash_test.go
generated
vendored
@@ -18,6 +18,8 @@ import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
func TestHashUnmarshalJSON(t *testing.T) {
|
||||
@@ -55,7 +57,7 @@ func TestHashUnmarshalJSON(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashAssertValid(t *testing.T) {
|
||||
func TestHashValidate(t *testing.T) {
|
||||
type in struct {
|
||||
hash Hash
|
||||
}
|
||||
@@ -86,8 +88,8 @@ func TestHashAssertValid(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
err := test.in.hash.AssertValid()
|
||||
if !reflect.DeepEqual(test.out.err, err) {
|
||||
err := test.in.hash.Validate()
|
||||
if !reflect.DeepEqual(report.ReportFromError(test.out.err, report.EntryError), err) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
10
vendor/github.com/coreos/ignition/config/types/ignition.go
generated
vendored
10
vendor/github.com/coreos/ignition/config/types/ignition.go
generated
vendored
@@ -19,6 +19,8 @@ import (
|
||||
"errors"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -56,12 +58,12 @@ func (v IgnitionVersion) MarshalJSON() ([]byte, error) {
|
||||
return semver.Version(v).MarshalJSON()
|
||||
}
|
||||
|
||||
func (v IgnitionVersion) AssertValid() error {
|
||||
func (v IgnitionVersion) Validate() report.Report {
|
||||
if MaxVersion.Major > v.Major {
|
||||
return ErrOldVersion
|
||||
return report.ReportFromError(ErrOldVersion, report.EntryError)
|
||||
}
|
||||
if MaxVersion.LessThan(semver.Version(v)) {
|
||||
return ErrNewVersion
|
||||
return report.ReportFromError(ErrNewVersion, report.EntryError)
|
||||
}
|
||||
return nil
|
||||
return report.Report{}
|
||||
}
|
||||
|
||||
46
vendor/github.com/coreos/ignition/config/types/partition.go
generated
vendored
46
vendor/github.com/coreos/ignition/config/types/partition.go
generated
vendored
@@ -15,9 +15,10 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
type Partition struct {
|
||||
@@ -29,59 +30,30 @@ type Partition struct {
|
||||
}
|
||||
|
||||
type PartitionLabel string
|
||||
type partitionLabel PartitionLabel
|
||||
|
||||
func (n *PartitionLabel) UnmarshalJSON(data []byte) error {
|
||||
tn := partitionLabel(*n)
|
||||
if err := json.Unmarshal(data, &tn); err != nil {
|
||||
return err
|
||||
}
|
||||
*n = PartitionLabel(tn)
|
||||
return n.AssertValid()
|
||||
}
|
||||
|
||||
func (n PartitionLabel) AssertValid() error {
|
||||
func (n PartitionLabel) Validate() report.Report {
|
||||
// http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_entries:
|
||||
// 56 (0x38) 72 bytes Partition name (36 UTF-16LE code units)
|
||||
|
||||
// XXX(vc): note GPT calls it a name, we're using label for consistency
|
||||
// with udev naming /dev/disk/by-partlabel/*.
|
||||
if len(string(n)) > 36 {
|
||||
return fmt.Errorf("partition labels may not exceed 36 characters")
|
||||
return report.ReportFromError(fmt.Errorf("partition labels may not exceed 36 characters"), report.EntryError)
|
||||
}
|
||||
return nil
|
||||
return report.Report{}
|
||||
}
|
||||
|
||||
type PartitionDimension uint64
|
||||
|
||||
func (n *PartitionDimension) UnmarshalJSON(data []byte) error {
|
||||
var pd uint64
|
||||
if err := json.Unmarshal(data, &pd); err != nil {
|
||||
return err
|
||||
}
|
||||
*n = PartitionDimension(pd)
|
||||
return nil
|
||||
}
|
||||
|
||||
type PartitionTypeGUID string
|
||||
type partitionTypeGUID PartitionTypeGUID
|
||||
|
||||
func (d *PartitionTypeGUID) UnmarshalJSON(data []byte) error {
|
||||
td := partitionTypeGUID(*d)
|
||||
if err := json.Unmarshal(data, &td); err != nil {
|
||||
return err
|
||||
}
|
||||
*d = PartitionTypeGUID(td)
|
||||
return d.AssertValid()
|
||||
}
|
||||
|
||||
func (d PartitionTypeGUID) AssertValid() error {
|
||||
func (d PartitionTypeGUID) Validate() report.Report {
|
||||
ok, err := regexp.MatchString("^(|[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12})$", string(d))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error matching type-guid regexp: %v", err)
|
||||
return report.ReportFromError(fmt.Errorf("error matching type-guid regexp: %v", err), report.EntryError)
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf(`partition type-guid must have the form "01234567-89AB-CDEF-EDCB-A98765432101", got: %q`, string(d))
|
||||
return report.ReportFromError(fmt.Errorf(`partition type-guid must have the form "01234567-89AB-CDEF-EDCB-A98765432101", got: %q`, string(d)), report.EntryError)
|
||||
}
|
||||
return nil
|
||||
return report.Report{}
|
||||
}
|
||||
|
||||
19
vendor/github.com/coreos/ignition/config/types/path.go
generated
vendored
19
vendor/github.com/coreos/ignition/config/types/path.go
generated
vendored
@@ -15,9 +15,10 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -25,24 +26,14 @@ var (
|
||||
)
|
||||
|
||||
type Path string
|
||||
type path Path
|
||||
|
||||
func (p *Path) UnmarshalJSON(data []byte) error {
|
||||
td := path(*p)
|
||||
if err := json.Unmarshal(data, &td); err != nil {
|
||||
return err
|
||||
}
|
||||
*p = Path(td)
|
||||
return p.AssertValid()
|
||||
}
|
||||
|
||||
func (p Path) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + string(p) + `"`), nil
|
||||
}
|
||||
|
||||
func (p Path) AssertValid() error {
|
||||
func (p Path) Validate() report.Report {
|
||||
if !filepath.IsAbs(string(p)) {
|
||||
return ErrPathRelative
|
||||
return report.ReportFromError(ErrPathRelative, report.EntryError)
|
||||
}
|
||||
return nil
|
||||
return report.Report{}
|
||||
}
|
||||
|
||||
44
vendor/github.com/coreos/ignition/config/types/path_test.go
generated
vendored
44
vendor/github.com/coreos/ignition/config/types/path_test.go
generated
vendored
@@ -15,47 +15,13 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
func TestPathUnmarshalJSON(t *testing.T) {
|
||||
type in struct {
|
||||
data string
|
||||
}
|
||||
type out struct {
|
||||
device Path
|
||||
err error
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
in in
|
||||
out out
|
||||
}{
|
||||
{
|
||||
in: in{data: `"/path"`},
|
||||
out: out{device: Path("/path")},
|
||||
},
|
||||
{
|
||||
in: in{data: `"bad"`},
|
||||
out: out{device: Path("bad"), err: ErrPathRelative},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
var device Path
|
||||
err := json.Unmarshal([]byte(test.in.data), &device)
|
||||
if !reflect.DeepEqual(test.out.err, err) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.out.device, device) {
|
||||
t.Errorf("#%d: bad device: want %#v, got %#v", i, test.out.device, device)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPathAssertValid(t *testing.T) {
|
||||
func TestPathValidate(t *testing.T) {
|
||||
type in struct {
|
||||
device Path
|
||||
}
|
||||
@@ -90,8 +56,8 @@ func TestPathAssertValid(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
err := test.in.device.AssertValid()
|
||||
if !reflect.DeepEqual(test.out.err, err) {
|
||||
err := test.in.device.Validate()
|
||||
if !reflect.DeepEqual(report.ReportFromError(test.out.err, report.EntryError), err) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
21
vendor/github.com/coreos/ignition/config/types/raid.go
generated
vendored
21
vendor/github.com/coreos/ignition/config/types/raid.go
generated
vendored
@@ -15,8 +15,9 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
type Raid struct {
|
||||
@@ -25,22 +26,12 @@ type Raid struct {
|
||||
Devices []Path `json:"devices,omitempty"`
|
||||
Spares int `json:"spares,omitempty"`
|
||||
}
|
||||
type raid Raid
|
||||
|
||||
func (n *Raid) UnmarshalJSON(data []byte) error {
|
||||
tn := raid(*n)
|
||||
if err := json.Unmarshal(data, &tn); err != nil {
|
||||
return err
|
||||
}
|
||||
*n = Raid(tn)
|
||||
return n.AssertValid()
|
||||
}
|
||||
|
||||
func (n Raid) AssertValid() error {
|
||||
func (n Raid) Validate() report.Report {
|
||||
switch n.Level {
|
||||
case "linear", "raid0", "0", "stripe":
|
||||
if n.Spares != 0 {
|
||||
return fmt.Errorf("spares unsupported for %q arrays", n.Level)
|
||||
return report.ReportFromError(fmt.Errorf("spares unsupported for %q arrays", n.Level), report.EntryError)
|
||||
}
|
||||
case "raid1", "1", "mirror":
|
||||
case "raid4", "4":
|
||||
@@ -48,7 +39,7 @@ func (n Raid) AssertValid() error {
|
||||
case "raid6", "6":
|
||||
case "raid10", "10":
|
||||
default:
|
||||
return fmt.Errorf("unrecognized raid level: %q", n.Level)
|
||||
return report.ReportFromError(fmt.Errorf("unrecognized raid level: %q", n.Level), report.EntryError)
|
||||
}
|
||||
return nil
|
||||
return report.Report{}
|
||||
}
|
||||
|
||||
51
vendor/github.com/coreos/ignition/config/types/unit.go
generated
vendored
51
vendor/github.com/coreos/ignition/config/types/unit.go
generated
vendored
@@ -15,9 +15,10 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
type SystemdUnit struct {
|
||||
@@ -34,44 +35,24 @@ type SystemdUnitDropIn struct {
|
||||
}
|
||||
|
||||
type SystemdUnitName string
|
||||
type systemdUnitName SystemdUnitName
|
||||
|
||||
func (n *SystemdUnitName) UnmarshalJSON(data []byte) error {
|
||||
tn := systemdUnitName(*n)
|
||||
if err := json.Unmarshal(data, &tn); err != nil {
|
||||
return err
|
||||
}
|
||||
*n = SystemdUnitName(tn)
|
||||
return n.AssertValid()
|
||||
}
|
||||
|
||||
func (n SystemdUnitName) AssertValid() error {
|
||||
func (n SystemdUnitName) Validate() report.Report {
|
||||
switch filepath.Ext(string(n)) {
|
||||
case ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice", ".scope":
|
||||
return nil
|
||||
return report.Report{}
|
||||
default:
|
||||
return errors.New("invalid systemd unit extension")
|
||||
return report.ReportFromError(errors.New("invalid systemd unit extension"), report.EntryError)
|
||||
}
|
||||
}
|
||||
|
||||
type SystemdUnitDropInName string
|
||||
type systemdUnitDropInName SystemdUnitDropInName
|
||||
|
||||
func (n *SystemdUnitDropInName) UnmarshalJSON(data []byte) error {
|
||||
tn := systemdUnitDropInName(*n)
|
||||
if err := json.Unmarshal(data, &tn); err != nil {
|
||||
return err
|
||||
}
|
||||
*n = SystemdUnitDropInName(tn)
|
||||
return n.AssertValid()
|
||||
}
|
||||
|
||||
func (n SystemdUnitDropInName) AssertValid() error {
|
||||
func (n SystemdUnitDropInName) Validate() report.Report {
|
||||
switch filepath.Ext(string(n)) {
|
||||
case ".conf":
|
||||
return nil
|
||||
return report.Report{}
|
||||
default:
|
||||
return errors.New("invalid systemd unit drop-in extension")
|
||||
return report.ReportFromError(errors.New("invalid systemd unit drop-in extension"), report.EntryError)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -81,22 +62,12 @@ type NetworkdUnit struct {
|
||||
}
|
||||
|
||||
type NetworkdUnitName string
|
||||
type networkdUnitName NetworkdUnitName
|
||||
|
||||
func (n *NetworkdUnitName) UnmarshalJSON(data []byte) error {
|
||||
tn := networkdUnitName(*n)
|
||||
if err := json.Unmarshal(data, &tn); err != nil {
|
||||
return err
|
||||
}
|
||||
*n = NetworkdUnitName(tn)
|
||||
return n.AssertValid()
|
||||
}
|
||||
|
||||
func (n NetworkdUnitName) AssertValid() error {
|
||||
func (n NetworkdUnitName) Validate() report.Report {
|
||||
switch filepath.Ext(string(n)) {
|
||||
case ".link", ".netdev", ".network":
|
||||
return nil
|
||||
return report.Report{}
|
||||
default:
|
||||
return errors.New("invalid networkd unit extension")
|
||||
return report.ReportFromError(errors.New("invalid networkd unit extension"), report.EntryError)
|
||||
}
|
||||
}
|
||||
|
||||
65
vendor/github.com/coreos/ignition/config/types/unit_test.go
generated
vendored
65
vendor/github.com/coreos/ignition/config/types/unit_test.go
generated
vendored
@@ -15,19 +15,19 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
func TestSystemdUnitNameUnmarshalJSON(t *testing.T) {
|
||||
func TestSystemdUnitNameValidate(t *testing.T) {
|
||||
type in struct {
|
||||
data string
|
||||
unit SystemdUnitName
|
||||
}
|
||||
type out struct {
|
||||
unit SystemdUnitName
|
||||
err error
|
||||
err error
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
@@ -35,42 +35,33 @@ func TestSystemdUnitNameUnmarshalJSON(t *testing.T) {
|
||||
out out
|
||||
}{
|
||||
{
|
||||
in: in{data: `"test.service"`},
|
||||
out: out{unit: SystemdUnitName("test.service")},
|
||||
in: in{unit: SystemdUnitName("test.service")},
|
||||
out: out{err: nil},
|
||||
},
|
||||
{
|
||||
in: in{data: `"test.socket"`},
|
||||
out: out{unit: SystemdUnitName("test.socket")},
|
||||
in: in{unit: SystemdUnitName("test.socket")},
|
||||
out: out{err: nil},
|
||||
},
|
||||
{
|
||||
in: in{data: `"test.blah"`},
|
||||
in: in{unit: SystemdUnitName("test.blah")},
|
||||
out: out{err: errors.New("invalid systemd unit extension")},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
var unit SystemdUnitName
|
||||
err := json.Unmarshal([]byte(test.in.data), &unit)
|
||||
if !reflect.DeepEqual(test.out.err, err) {
|
||||
err := test.in.unit.Validate()
|
||||
if !reflect.DeepEqual(report.ReportFromError(test.out.err, report.EntryError), err) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
|
||||
}
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(test.out.unit, unit) {
|
||||
t.Errorf("#%d: bad unit: want %#v, got %#v", i, test.out.unit, unit)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetworkdUnitNameUnmarshalJSON(t *testing.T) {
|
||||
func TestNetworkdUnitNameValidate(t *testing.T) {
|
||||
type in struct {
|
||||
data string
|
||||
unit NetworkdUnitName
|
||||
}
|
||||
type out struct {
|
||||
unit NetworkdUnitName
|
||||
err error
|
||||
err error
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
@@ -78,35 +69,27 @@ func TestNetworkdUnitNameUnmarshalJSON(t *testing.T) {
|
||||
out out
|
||||
}{
|
||||
{
|
||||
in: in{data: `"test.network"`},
|
||||
out: out{unit: NetworkdUnitName("test.network")},
|
||||
in: in{unit: NetworkdUnitName("test.network")},
|
||||
out: out{err: nil},
|
||||
},
|
||||
{
|
||||
in: in{data: `"test.link"`},
|
||||
out: out{unit: NetworkdUnitName("test.link")},
|
||||
in: in{unit: NetworkdUnitName("test.link")},
|
||||
out: out{err: nil},
|
||||
},
|
||||
{
|
||||
in: in{data: `"test.netdev"`},
|
||||
out: out{unit: NetworkdUnitName("test.netdev")},
|
||||
in: in{unit: NetworkdUnitName("test.netdev")},
|
||||
out: out{err: nil},
|
||||
},
|
||||
{
|
||||
in: in{data: `"test.blah"`},
|
||||
in: in{unit: NetworkdUnitName("test.blah")},
|
||||
out: out{err: errors.New("invalid networkd unit extension")},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
var unit NetworkdUnitName
|
||||
err := json.Unmarshal([]byte(test.in.data), &unit)
|
||||
if !reflect.DeepEqual(test.out.err, err) {
|
||||
err := test.in.unit.Validate()
|
||||
if !reflect.DeepEqual(report.ReportFromError(test.out.err, report.EntryError), err) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
|
||||
}
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(test.out.unit, unit) {
|
||||
t.Errorf("#%d: bad unit: want %#v, got %#v", i, test.out.unit, unit)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
26
vendor/github.com/coreos/ignition/config/types/url.go
generated
vendored
26
vendor/github.com/coreos/ignition/config/types/url.go
generated
vendored
@@ -16,7 +16,14 @@ package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/url"
|
||||
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInvalidScheme = errors.New("invalid url scheme")
|
||||
)
|
||||
|
||||
type Url url.URL
|
||||
@@ -28,8 +35,12 @@ func (u *Url) UnmarshalJSON(data []byte) error {
|
||||
}
|
||||
|
||||
pu, err := url.Parse(tu)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*u = Url(*pu)
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u Url) MarshalJSON() ([]byte, error) {
|
||||
@@ -40,3 +51,16 @@ func (u Url) String() string {
|
||||
tu := url.URL(u)
|
||||
return (&tu).String()
|
||||
}
|
||||
|
||||
func (u Url) Validate() report.Report {
|
||||
// Empty url is valid, indicates an empty file
|
||||
if u.String() == "" {
|
||||
return report.Report{}
|
||||
}
|
||||
switch url.URL(u).Scheme {
|
||||
case "http", "https", "oem", "data":
|
||||
return report.Report{}
|
||||
}
|
||||
|
||||
return report.ReportFromError(ErrInvalidScheme, report.EntryError)
|
||||
}
|
||||
|
||||
@@ -12,21 +12,22 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
package types
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/go-yaml/yaml"
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
func TestAssertKeysValid(t *testing.T) {
|
||||
func TestURLValidate(t *testing.T) {
|
||||
type in struct {
|
||||
data string
|
||||
u string
|
||||
}
|
||||
type out struct {
|
||||
err ErrKeysUnrecognized
|
||||
err error
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
@@ -34,31 +35,39 @@ func TestAssertKeysValid(t *testing.T) {
|
||||
out out
|
||||
}{
|
||||
{
|
||||
in: in{data: "ignition:\n config:"},
|
||||
in: in{u: ""},
|
||||
out: out{},
|
||||
},
|
||||
{
|
||||
in: in{data: "passwd:\n groups:\n - name: example"},
|
||||
in: in{u: "http://example.com"},
|
||||
out: out{},
|
||||
},
|
||||
{
|
||||
in: in{data: "password:\n groups:"},
|
||||
out: out{err: ErrKeysUnrecognized{"password"}},
|
||||
in: in{u: "https://example.com"},
|
||||
out: out{},
|
||||
},
|
||||
{
|
||||
in: in{data: "passwd:\n groups:\n - naem: example"},
|
||||
out: out{err: ErrKeysUnrecognized{"naem"}},
|
||||
in: in{u: "oem:///foobar"},
|
||||
out: out{},
|
||||
},
|
||||
{
|
||||
in: in{u: "data:,example%20file%0A"},
|
||||
out: out{},
|
||||
},
|
||||
{
|
||||
in: in{u: "bad://"},
|
||||
out: out{err: ErrInvalidScheme},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
var cfg interface{}
|
||||
if err := yaml.Unmarshal([]byte(test.in.data), &cfg); err != nil {
|
||||
t.Errorf("#%d: unmarshal failed: %v", i, err)
|
||||
continue
|
||||
u, err := url.Parse(test.in.u)
|
||||
if err != nil {
|
||||
t.Errorf("URL failed to parse. This is an error with the test")
|
||||
}
|
||||
if err := assertKeysValid(cfg, reflect.TypeOf(Config{})); !reflect.DeepEqual(err, test.out.err) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
|
||||
r := Url(*u).Validate()
|
||||
if !reflect.DeepEqual(report.ReportFromError(test.out.err, report.EntryError), r) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
2
vendor/github.com/coreos/ignition/config/v1/config.go
generated
vendored
2
vendor/github.com/coreos/ignition/config/v1/config.go
generated
vendored
@@ -22,7 +22,7 @@ import (
|
||||
|
||||
"github.com/coreos/ignition/config/v1/types"
|
||||
|
||||
"github.com/camlistore/camlistore/pkg/errorutil"
|
||||
"go4.org/errorutil"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
3
vendor/github.com/coreos/ignition/config/v1/vendor.manifest
generated
vendored
3
vendor/github.com/coreos/ignition/config/v1/vendor.manifest
generated
vendored
@@ -1,3 +0,0 @@
|
||||
# If you manipulate the contents of third_party/, amend this accordingly.
|
||||
# pkg version
|
||||
github.com/camlistore/camlistore/pkg/errorutil 9106ce829629773474c689b34aacd7d3aaa99426
|
||||
73
vendor/github.com/coreos/ignition/config/validate/astjson/node.go
generated
vendored
Normal file
73
vendor/github.com/coreos/ignition/config/validate/astjson/node.go
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
json "github.com/ajeddeloh/go-json"
|
||||
"github.com/coreos/ignition/config/validate"
|
||||
"go4.org/errorutil"
|
||||
)
|
||||
|
||||
type JsonNode json.Node
|
||||
|
||||
func FromJsonRoot(n json.Node) JsonNode {
|
||||
return JsonNode(n)
|
||||
}
|
||||
|
||||
func (n JsonNode) ValueLineCol(source io.ReadSeeker) (int, int, string) {
|
||||
return posFromOffset(n.End, source)
|
||||
}
|
||||
|
||||
func (n JsonNode) KeyLineCol(source io.ReadSeeker) (int, int, string) {
|
||||
return posFromOffset(n.KeyEnd, source)
|
||||
}
|
||||
|
||||
func (n JsonNode) LiteralValue() interface{} {
|
||||
return n.Value
|
||||
}
|
||||
|
||||
func (n JsonNode) SliceChild(index int) (validate.AstNode, bool) {
|
||||
if slice, ok := n.Value.([]json.Node); ok {
|
||||
return JsonNode(slice[index]), true
|
||||
}
|
||||
return JsonNode{}, false
|
||||
}
|
||||
|
||||
func (n JsonNode) KeyValueMap() (map[string]validate.AstNode, bool) {
|
||||
if kvmap, ok := n.Value.(map[string]json.Node); ok {
|
||||
newKvmap := map[string]validate.AstNode{}
|
||||
for k, v := range kvmap {
|
||||
newKvmap[k] = JsonNode(v)
|
||||
}
|
||||
return newKvmap, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (n JsonNode) Tag() string {
|
||||
return "json"
|
||||
}
|
||||
|
||||
// wrapper for errorutil that handles missing sources sanely and resets the reader afterwards
|
||||
func posFromOffset(offset int, source io.ReadSeeker) (int, int, string) {
|
||||
if source == nil {
|
||||
return 0, 0, ""
|
||||
}
|
||||
line, col, highlight := errorutil.HighlightBytePosition(source, int64(offset))
|
||||
source.Seek(0, 0) // Reset the reader to the start so the next call isn't relative to this position
|
||||
return line, col, highlight
|
||||
}
|
||||
158
vendor/github.com/coreos/ignition/config/validate/report/report.go
generated
vendored
Normal file
158
vendor/github.com/coreos/ignition/config/validate/report/report.go
generated
vendored
Normal file
@@ -0,0 +1,158 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package report
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type Report struct {
|
||||
Entries []Entry
|
||||
}
|
||||
|
||||
func (into *Report) Merge(from Report) {
|
||||
into.Entries = append(into.Entries, from.Entries...)
|
||||
}
|
||||
|
||||
func ReportFromError(err error, severity entryKind) Report {
|
||||
if err == nil {
|
||||
return Report{}
|
||||
}
|
||||
return Report{
|
||||
Entries: []Entry{
|
||||
{
|
||||
Kind: severity,
|
||||
Message: err.Error(),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Sort sorts the entries by line number, then column number
|
||||
func (r *Report) Sort() {
|
||||
sort.Sort(entries(r.Entries))
|
||||
}
|
||||
|
||||
type entries []Entry
|
||||
|
||||
func (e entries) Len() int {
|
||||
return len(e)
|
||||
}
|
||||
|
||||
func (e entries) Swap(i, j int) {
|
||||
e[i], e[j] = e[j], e[i]
|
||||
}
|
||||
|
||||
func (e entries) Less(i, j int) bool {
|
||||
if e[i].Line != e[j].Line {
|
||||
return e[i].Line < e[j].Line
|
||||
}
|
||||
return e[i].Column < e[j].Column
|
||||
}
|
||||
|
||||
const (
|
||||
EntryError entryKind = iota
|
||||
EntryWarning
|
||||
EntryInfo
|
||||
EntryDeprecated
|
||||
)
|
||||
|
||||
// AddPosition updates all the entries with Line equal to 0 and sets the Line/Column fields to line/column. This is useful for
|
||||
// when a type has a custom unmarshaller and thus can't determine an exact offset of the error with the type. In this case
|
||||
// the offset for the entire chunk of json that got unmarshalled to the type can be used instead, which is still pretty good.
|
||||
func (r *Report) AddPosition(line, col int, highlight string) {
|
||||
for i, e := range r.Entries {
|
||||
if e.Line == 0 {
|
||||
r.Entries[i].Line = line
|
||||
r.Entries[i].Column = col
|
||||
r.Entries[i].Highlight = highlight
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Report) Add(e Entry) {
|
||||
r.Entries = append(r.Entries, e)
|
||||
}
|
||||
|
||||
func (r Report) String() string {
|
||||
var errs bytes.Buffer
|
||||
for i, entry := range r.Entries {
|
||||
if i != 0 {
|
||||
// Only add line breaks on multiline reports
|
||||
errs.WriteString("\n")
|
||||
}
|
||||
errs.WriteString(entry.String())
|
||||
}
|
||||
return errs.String()
|
||||
}
|
||||
|
||||
// IsFatal returns if there were any errors that make the config invalid
|
||||
func (r Report) IsFatal() bool {
|
||||
for _, entry := range r.Entries {
|
||||
if entry.Kind == EntryError {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsDeprecated returns if the report has deprecations
|
||||
func (r Report) IsDeprecated() bool {
|
||||
for _, entry := range r.Entries {
|
||||
if entry.Kind == EntryDeprecated {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type Entry struct {
|
||||
Kind entryKind `json:"kind"`
|
||||
Message string `json:"message"`
|
||||
Line int `json:"line,omitempty"`
|
||||
Column int `json:"column,omitempty"`
|
||||
Highlight string `json:"-"`
|
||||
}
|
||||
|
||||
func (e Entry) String() string {
|
||||
if e.Line != 0 {
|
||||
return fmt.Sprintf("%s at line %d, column %d\n%s%v", e.Kind.String(), e.Line, e.Column, e.Highlight, e.Message)
|
||||
}
|
||||
return fmt.Sprintf("%s: %v", e.Kind.String(), e.Message)
|
||||
}
|
||||
|
||||
type entryKind int
|
||||
|
||||
func (e entryKind) String() string {
|
||||
switch e {
|
||||
case EntryError:
|
||||
return "error"
|
||||
case EntryWarning:
|
||||
return "warning"
|
||||
case EntryInfo:
|
||||
return "info"
|
||||
case EntryDeprecated:
|
||||
return "deprecated"
|
||||
default:
|
||||
return "unknown error"
|
||||
}
|
||||
}
|
||||
|
||||
func (e entryKind) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(e.String())
|
||||
}
|
||||
227
vendor/github.com/coreos/ignition/config/validate/validate.go
generated
vendored
Normal file
227
vendor/github.com/coreos/ignition/config/validate/validate.go
generated
vendored
Normal file
@@ -0,0 +1,227 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/ignition/config/types"
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
type validator interface {
|
||||
Validate() report.Report
|
||||
}
|
||||
|
||||
// AstNode abstracts the differences between yaml and json nodes, providing a
|
||||
// common interface
|
||||
type AstNode interface {
|
||||
// ValueLineCol returns the line, column, and highlight string of the value of
|
||||
// this node in the source.
|
||||
ValueLineCol(source io.ReadSeeker) (int, int, string)
|
||||
|
||||
// KeyLineCol returns the line, column, and highlight string of the key for the
|
||||
// value of this node in the source.
|
||||
KeyLineCol(source io.ReadSeeker) (int, int, string)
|
||||
|
||||
// LiteralValue returns the value of this node.
|
||||
LiteralValue() interface{}
|
||||
|
||||
// SliceChild returns the child node at the index specified. If this node is not
|
||||
// a slice node, an empty AstNode and false is returned.
|
||||
SliceChild(index int) (AstNode, bool)
|
||||
|
||||
// KeyValueMap returns a map of keys and values. If this node is not a mapping
|
||||
// node, nil and false are returned.
|
||||
KeyValueMap() (map[string]AstNode, bool)
|
||||
|
||||
// Tag returns the struct tag used in the config structure used to unmarshal.
|
||||
Tag() string
|
||||
}
|
||||
|
||||
// Validate walks down a struct tree calling Validate on every node that implements it, building
|
||||
// A report of all the errors, warnings, info, and deprecations it encounters
|
||||
func Validate(vObj reflect.Value, ast AstNode, source io.ReadSeeker) (r report.Report) {
|
||||
if !vObj.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
line, col, highlight := 0, 0, ""
|
||||
if ast != nil {
|
||||
line, col, highlight = ast.ValueLineCol(source)
|
||||
}
|
||||
|
||||
// See if we A) can call Validate on vObj, and B) should call Validate. Validate should NOT be called
|
||||
// when vObj is nil, as it will panic or when vObj is a pointer to a value with Validate implemented with a
|
||||
// value receiver. This is to prevent Validate being called twice, as otherwise it would be called on the
|
||||
// pointer version (due to go's automatic deferencing) and once when the pointer is deferenced below. The only
|
||||
// time Validate should be called on a pointer is when the function is implemented with a pointer reciever.
|
||||
if obj, ok := vObj.Interface().(validator); ok &&
|
||||
((vObj.Kind() != reflect.Ptr) ||
|
||||
(!vObj.IsNil() && !vObj.Elem().Type().Implements(reflect.TypeOf((*validator)(nil)).Elem()))) {
|
||||
sub_r := obj.Validate()
|
||||
if vObj.Type() != reflect.TypeOf(types.Config{}) {
|
||||
// Config checks are done on the config as a whole and shouldn't get line numbers
|
||||
sub_r.AddPosition(line, col, highlight)
|
||||
}
|
||||
r.Merge(sub_r)
|
||||
|
||||
// Dont recurse on invalid inner nodes, it mostly leads to bogus messages
|
||||
if sub_r.IsFatal() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch vObj.Kind() {
|
||||
case reflect.Ptr:
|
||||
sub_report := Validate(vObj.Elem(), ast, source)
|
||||
sub_report.AddPosition(line, col, "")
|
||||
r.Merge(sub_report)
|
||||
case reflect.Struct:
|
||||
sub_report := validateStruct(vObj, ast, source)
|
||||
sub_report.AddPosition(line, col, "")
|
||||
r.Merge(sub_report)
|
||||
case reflect.Slice:
|
||||
for i := 0; i < vObj.Len(); i++ {
|
||||
sub_node := ast
|
||||
if ast != nil {
|
||||
if n, ok := ast.SliceChild(i); ok {
|
||||
sub_node = n
|
||||
}
|
||||
}
|
||||
sub_report := Validate(vObj.Index(i), sub_node, source)
|
||||
sub_report.AddPosition(line, col, "")
|
||||
r.Merge(sub_report)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ValidateWithoutSource(cfg reflect.Value) (report report.Report) {
|
||||
return Validate(cfg, nil, nil)
|
||||
}
|
||||
|
||||
type field struct {
|
||||
Type reflect.StructField
|
||||
Value reflect.Value
|
||||
}
|
||||
|
||||
// getFields returns a field of all the fields in the struct, including the fields of
|
||||
// embedded structs.
|
||||
func getFields(vObj reflect.Value) []field {
|
||||
ret := []field{}
|
||||
for i := 0; i < vObj.Type().NumField(); i++ {
|
||||
if vObj.Type().Field(i).Anonymous {
|
||||
ret = append(ret, getFields(vObj.Field(i))...)
|
||||
} else {
|
||||
ret = append(ret, field{Type: vObj.Type().Field(i), Value: vObj.Field(i)})
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func validateStruct(vObj reflect.Value, ast AstNode, source io.ReadSeeker) report.Report {
|
||||
r := report.Report{}
|
||||
|
||||
// isFromObject will be true if this struct was unmarshalled from a JSON object.
|
||||
keys, isFromObject := map[string]AstNode{}, false
|
||||
if ast != nil {
|
||||
keys, isFromObject = ast.KeyValueMap()
|
||||
}
|
||||
|
||||
// Maintain a set of key's that have been used.
|
||||
usedKeys := map[string]struct{}{}
|
||||
|
||||
// Maintain a list of all the tags in the struct for fuzzy matching later.
|
||||
tags := []string{}
|
||||
|
||||
for _, f := range getFields(vObj) {
|
||||
// Default to nil AstNode if the field's corrosponding node cannot be found.
|
||||
var sub_node AstNode
|
||||
// Default to passing a nil source if the field's corrosponding node cannot be found.
|
||||
// This ensures the line numbers reported from all sub-structs are 0 and will be changed by AddPosition
|
||||
var src io.ReadSeeker
|
||||
|
||||
// Try to determine the json.Node that corrosponds with the struct field
|
||||
if isFromObject {
|
||||
tag := strings.SplitN(f.Type.Tag.Get(ast.Tag()), ",", 2)[0]
|
||||
// Save the tag so we have a list of all the tags in the struct
|
||||
tags = append(tags, tag)
|
||||
// mark that this key was used
|
||||
usedKeys[tag] = struct{}{}
|
||||
|
||||
if sub, ok := keys[tag]; ok {
|
||||
// Found it
|
||||
sub_node = sub
|
||||
src = source
|
||||
}
|
||||
}
|
||||
sub_report := Validate(f.Value, sub_node, src)
|
||||
// Default to deepest node if the node's type isn't an object,
|
||||
// such as when a json string actually unmarshal to structs (like with version)
|
||||
line, col := 0, 0
|
||||
if ast != nil {
|
||||
line, col, _ = ast.ValueLineCol(src)
|
||||
}
|
||||
sub_report.AddPosition(line, col, "")
|
||||
r.Merge(sub_report)
|
||||
}
|
||||
if !isFromObject {
|
||||
// If this struct was not unmarshalled from a JSON object, there cannot be unused keys.
|
||||
return r
|
||||
}
|
||||
|
||||
for k, v := range keys {
|
||||
if _, hasKey := usedKeys[k]; hasKey {
|
||||
continue
|
||||
}
|
||||
line, col, highlight := v.KeyLineCol(source)
|
||||
typo := similar(k, tags)
|
||||
|
||||
r.Add(report.Entry{
|
||||
Kind: report.EntryWarning,
|
||||
Message: fmt.Sprintf("Config has unrecognized key: %s", k),
|
||||
Line: line,
|
||||
Column: col,
|
||||
Highlight: highlight,
|
||||
})
|
||||
|
||||
if typo != "" {
|
||||
r.Add(report.Entry{
|
||||
Kind: report.EntryInfo,
|
||||
Message: fmt.Sprintf("Did you mean %s instead of %s", typo, k),
|
||||
Line: line,
|
||||
Column: col,
|
||||
Highlight: highlight,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// similar returns a string in candidates that is similar to str. Currently it just does case
|
||||
// insensitive comparison, but it should be updated to use levenstein distances to catch typos
|
||||
func similar(str string, candidates []string) string {
|
||||
for _, candidate := range candidates {
|
||||
if strings.EqualFold(str, candidate) {
|
||||
return candidate
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -12,15 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
package validate
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
// Import into the same namespace to keep config definitions clean
|
||||
. "github.com/coreos/ignition/config/types"
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
)
|
||||
|
||||
func TestAssertValid(t *testing.T) {
|
||||
func TestValidate(t *testing.T) {
|
||||
type in struct {
|
||||
cfg Config
|
||||
}
|
||||
@@ -96,9 +100,10 @@ func TestAssertValid(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
err := test.in.cfg.AssertValid()
|
||||
if !reflect.DeepEqual(test.out.err, err) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
|
||||
r := ValidateWithoutSource(reflect.ValueOf(test.in.cfg))
|
||||
expectedReport := report.ReportFromError(test.out.err, report.EntryError)
|
||||
if !reflect.DeepEqual(expectedReport, r) {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, expectedReport, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
9
vendor/github.com/coreos/ignition/config/vendor.manifest
generated
vendored
9
vendor/github.com/coreos/ignition/config/vendor.manifest
generated
vendored
@@ -1,5 +1,6 @@
|
||||
# If you manipulate the contents of vendor/, amend this accordingly.
|
||||
# pkg version
|
||||
github.com/coreos/go-semver 294930c1e79c64e7dbe360054274fdad492c8cf5
|
||||
github.com/vincent-petithory/dataurl 9a301d65acbb728fcc3ace14f45f511a4cfeea9c
|
||||
go4.org/errorutil 03efcb870d84809319ea509714dd6d19a1498483
|
||||
# pkg version
|
||||
github.com/ajeddeloh/go-json 73d058cf8437a1989030afe571eeab9f90eebbbd
|
||||
github.com/coreos/go-semver 294930c1e79c64e7dbe360054274fdad492c8cf5
|
||||
github.com/vincent-petithory/dataurl 9a301d65acbb728fcc3ace14f45f511a4cfeea9c
|
||||
go4.org/errorutil 03efcb870d84809319ea509714dd6d19a1498483
|
||||
|
||||
20
vendor/github.com/coreos/ignition/doc/examples.md
generated
vendored
20
vendor/github.com/coreos/ignition/doc/examples.md
generated
vendored
@@ -189,7 +189,7 @@ In many scenarios, it may be useful to have an external data volume. This config
|
||||
|
||||
```
|
||||
[Mount]
|
||||
What=/dev/data
|
||||
What=/dev/md/data
|
||||
Where=/var/lib/data
|
||||
Type=ext4
|
||||
|
||||
@@ -217,4 +217,22 @@ In some cloud environments, there is a limit on the size of the config which may
|
||||
|
||||
The SHA512 sum of the config can be determined using `sha512sum`.
|
||||
|
||||
## Setting the hostname
|
||||
|
||||
Setting the hostname of a system is as simple as writing `/etc/hostname`:
|
||||
|
||||
```json
|
||||
{
|
||||
"ignition": { "version": "2.0.0" },
|
||||
"storage": {
|
||||
"files": [{
|
||||
"filesystem": "root",
|
||||
"path": "/etc/hostname",
|
||||
"mode": 420,
|
||||
"contents": { "source": "data:,core1" }
|
||||
}]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
[rfc2397]: http://tools.ietf.org/html/rfc2397
|
||||
|
||||
20
vendor/github.com/coreos/ignition/doc/getting-started.md
generated
vendored
20
vendor/github.com/coreos/ignition/doc/getting-started.md
generated
vendored
@@ -2,7 +2,7 @@
|
||||
|
||||
*Ignition* is a low-level system configuration utility. The Ignition executable is part of the temporary initial root filesystem, the *initramfs*. When Ignition runs, it finds configuration data in a named location for a given environment, such as a file or URL, and applies it to the machine before `switch_root` is called to pivot to the machine's root filesystem.
|
||||
|
||||
Ignition uses a JSON configuration file to represent the set of changes to be made. The format of this config is detailed [in the specification][configspec]. One of the most important parts of this config is the version number. This **must** match the version number accepted by Ignition. If the config version isn't accepted by Ignition, Ignition will fail to run and prevent the machine from booting. This can be seen by inspecting the console output of the failed instance. For more information, check out the [troubleshooting section][troubleshooting].
|
||||
Ignition uses a JSON configuration file to represent the set of changes to be made. The format of this config is detailed [in the specification][configspec] and the [MIME type][mime] is registered with IANA. One of the most important parts of this config is the version number. This **must** match the version number accepted by Ignition. If the config version isn't accepted by Ignition, Ignition will fail to run and the machine will not boot. This can be seen by inspecting the console output of the failed machine. For more information, check out the [troubleshooting section][troubleshooting].
|
||||
|
||||
## Providing a Config
|
||||
|
||||
@@ -23,17 +23,31 @@ The configuration must be passed to Ignition through the designated data source.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Gathering Logs
|
||||
|
||||
The single most useful piece of information needed when troubleshooting is the log from Ignition. Ignition runs in multiple stages so it's easiest to filter by the syslog identifier: `ignition`. When using systemd, this can be accomplished with the following command:
|
||||
|
||||
```
|
||||
journalctl --identifier=ignition
|
||||
journalctl --identifier=ignition --all
|
||||
```
|
||||
|
||||
In the event that this doesn't yield any results, running as root may help. There are circumstances where the journal isn't owned by the systemd-journal group or the current user is not a part of that group.
|
||||
|
||||
In the vast majority of cases, it will be immediately obvious why Ignition failed. If it's not, inspect the config that Ignition wrote into the log. This shows how Ignition interpreted the supplied configuration. The user-provided config may have a misspelled section or maybe an incorrect hierarchy.
|
||||
### Validating the Configuration
|
||||
|
||||
One common cause for Ignition failures is a malformed configuration (e.g. a misspelled section or incorrect hierarchy). Ignition will log errors, warnings, and other notes about the configuration that it parsed, so this can be used to debug issues with the configuration provided. As a convenience, CoreOS hosts an [online validator][validator] which can be used to quickly verify configurations.
|
||||
|
||||
### Enabling systemd Services
|
||||
|
||||
When Ignition enables systemd services, it doesn't directly create the symlinks necessary for systemd; it leverages [systemd presets][preset]. Presets are only evaluated on [first-boot][conditions], which can result in confusion if Ignition is forced to run more than once. Any systemd services which have been enabled in the configuration after the first boot won't actually be enabled after the next invocation of Ignition. `systemctl preset-all` will need to be manually invoked to create the necessary symlinks, enabling the services.
|
||||
|
||||
Ignition is not typically run more than once during a machine's lifetime in a given role, so this situation requiring manual systemd intervention does not commonly arise.
|
||||
|
||||
[conditions]: https://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConditionArchitecture=
|
||||
[configspec]: configuration.md
|
||||
[examples]: examples.md
|
||||
[mime]: http://www.iana.org/assignments/media-types/application/vnd.coreos.ignition+json
|
||||
[platforms]: supported-platforms.md
|
||||
[preset]: https://www.freedesktop.org/software/systemd/man/systemd.preset.html
|
||||
[troubleshooting]: #troubleshooting
|
||||
[validator]: https://coreos.com/validate
|
||||
|
||||
6
vendor/github.com/coreos/ignition/doc/supported-platforms.md
generated
vendored
6
vendor/github.com/coreos/ignition/doc/supported-platforms.md
generated
vendored
@@ -8,6 +8,9 @@ Ignition is currently only supported for the following platforms:
|
||||
* [Microsoft Azure] - Ignition will read its configuration from the custom data provided to the instance. SSH keys are handled by the Azure Linux Agent.
|
||||
* [VMware] - Use the VMware Guestinfo variables `coreos.config.data` and `coreos.config.data.encoding` to provide the config and its encoding to the virtual machine. Valid encodings are "", "base64", and "gzip+base64".
|
||||
* [Google Compute Engine] - Ignition will read its configuration from the instance metadata entry named "user-data". SSH keys are handled by coreos-metadata.
|
||||
* [Packet] - Ignition will read its configuration from the instance userdata. SSH keys are handled by coreos-metadata.
|
||||
* [QEMU] - Ignition will read its configuration from the 'opt/com.coreos/config' key on the QEMU Firmware Configuration Device.
|
||||
* [DigitalOcean] - Ignition will read its configuration from the droplet userdata. SSH keys and network configuration are handled by coreos-metadata.
|
||||
|
||||
Ignition is under active development so expect this list to expand in the coming months.
|
||||
|
||||
@@ -17,3 +20,6 @@ Ignition is under active development so expect this list to expand in the coming
|
||||
[Microsoft Azure]: https://github.com/coreos/docs/blob/master/os/booting-on-azure.md
|
||||
[VMware]: https://github.com/coreos/docs/blob/master/os/booting-on-vmware.md
|
||||
[Google Compute Engine]: https://github.com/coreos/docs/blob/master/os/booting-on-google-compute-engine.md
|
||||
[Packet]: https://github.com/coreos/docs/blob/master/os/booting-on-packet.md
|
||||
[QEMU]: https://github.com/qemu/qemu/blob/d75aa4372f0414c9960534026a562b0302fcff29/docs/specs/fw_cfg.txt
|
||||
[DigitalOcean]: https://github.com/coreos/docs/blob/master/os/booting-on-digitalocean.md
|
||||
|
||||
481
vendor/github.com/coreos/ignition/gimme.local
generated
vendored
481
vendor/github.com/coreos/ignition/gimme.local
generated
vendored
@@ -1,481 +0,0 @@
|
||||
#!/bin/bash
|
||||
# vim:noexpandtab:ts=2:sw=2:
|
||||
#
|
||||
#+ Usage: $(basename $0) [flags] [go-version] [version-prefix]
|
||||
#+ -
|
||||
#+ Version: ${GIMME_VERSION}
|
||||
#+ -
|
||||
#+ Install go! There are multiple types of installations available, with 'auto' being the default.
|
||||
#+ If either 'auto' or 'binary' is specified as GIMME_TYPE, gimme will first check for an existing
|
||||
#+ go installation. This behavior may be disabled by providing '-f/--force/force' as first positional
|
||||
#+ argument.
|
||||
#+ -
|
||||
#+ Option flags:
|
||||
#+ -h --help help - show this help text and exit
|
||||
#+ -V --version version - show the version only and exit
|
||||
#+ -f --force force - remove the existing go installation if present prior to install
|
||||
#+ -l --list list - list installed go versions and exit
|
||||
#+ -
|
||||
#+ Influential env vars:
|
||||
#+ -
|
||||
#+ GIMME_GO_VERSION - version to install (*REQUIRED*, may be given as first positional arg)
|
||||
#+ GIMME_VERSION_PREFIX - prefix for installed versions (default '${GIMME_VERSION_PREFIX}',
|
||||
#+ may be given as second positional arg)
|
||||
#+ GIMME_ARCH - arch to install (default '${GIMME_ARCH}')
|
||||
#+ GIMME_BINARY_OSX - darwin-specific binary suffix (default '${GIMME_BINARY_OSX}')
|
||||
#+ GIMME_ENV_PREFIX - prefix for env files (default '${GIMME_ENV_PREFIX}')
|
||||
#+ GIMME_GO_GIT_REMOTE - git remote for git-based install (default '${GIMME_GO_GIT_REMOTE}')
|
||||
#+ GIMME_OS - os to install (default '${GIMME_OS}')
|
||||
#+ GIMME_TMP - temp directory (default '${GIMME_TMP}')
|
||||
#+ GIMME_TYPE - install type to perform ('auto', 'binary', 'source', or 'git')
|
||||
#+ (default '${GIMME_TYPE}')
|
||||
#+ GIMME_DEBUG - enable tracing if non-empty
|
||||
#+ GIMME_NO_ENV_ALIAS - disable creation of env 'alias' file when os and arch match host
|
||||
#+ GIMME_SILENT_ENV - omit the 'go version' line from env file
|
||||
#+ GIMME_CGO_ENABLED - enable build of cgo support
|
||||
#+ GIMME_CC_FOR_TARGET - cross compiler for cgo support
|
||||
#+ -
|
||||
#
|
||||
# The MIT License (MIT)
|
||||
#
|
||||
# Copyright (c) 2015 Dan Buch, Tianon Gravi
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in all
|
||||
# copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
#
|
||||
set -e
|
||||
shopt -s nullglob
|
||||
set -o pipefail
|
||||
|
||||
[[ ${GIMME_DEBUG} ]] && set -x
|
||||
|
||||
GIMME_VERSION=v0.2.3
|
||||
|
||||
# _do_curl "url" "file"
|
||||
_do_curl() {
|
||||
mkdir -p "$(dirname "${2}")"
|
||||
|
||||
if command -v curl > /dev/null ; then
|
||||
curl -sSLf "${1}" -o "${2}" 2>/dev/null
|
||||
return
|
||||
fi
|
||||
|
||||
if command -v wget > /dev/null ; then
|
||||
wget -q "${1}" -O "${2}" 2>/dev/null
|
||||
return
|
||||
fi
|
||||
|
||||
echo >&2 'error: no curl or wget found'
|
||||
exit 1
|
||||
}
|
||||
|
||||
# _do_curls "file" "url" ["url"...]
|
||||
_do_curls() {
|
||||
f="${1}"
|
||||
shift
|
||||
[[ ! -s "${f}" ]] || return 0
|
||||
for url in "${@}" ; do
|
||||
if _do_curl "${url}" "${f}" ; then
|
||||
return
|
||||
fi
|
||||
done
|
||||
rm -f "${f}"
|
||||
return 1
|
||||
}
|
||||
|
||||
# _binary "version" "file.tar.gz" "arch"
|
||||
_binary() {
|
||||
local version=${1}
|
||||
local file=${2}
|
||||
local arch=${3}
|
||||
urls=(
|
||||
"https://storage.googleapis.com/golang/go${version}.${GIMME_OS}-${arch}.tar.gz"
|
||||
"https://go.googlecode.com/files/go${version}.${GIMME_OS}-${arch}.tar.gz"
|
||||
"https://go.googlecode.com/files/go.${version}.${GIMME_OS}-${arch}.tar.gz"
|
||||
)
|
||||
if [ "${GIMME_OS}" = 'darwin' -a "${GIMME_BINARY_OSX}" ] ; then
|
||||
urls=(
|
||||
"https://storage.googleapis.com/golang/go${version}.${GIMME_OS}-${arch}-${GIMME_BINARY_OSX}.tar.gz"
|
||||
"${urls[@]}"
|
||||
)
|
||||
fi
|
||||
if [ "${arch}" = 'arm' ] ; then
|
||||
# attempt "armv6l" vs just "arm" first (since that's what's officially published)
|
||||
urls=(
|
||||
"https://storage.googleapis.com/golang/go${version}.${GIMME_OS}-${arch}v6l.tar.gz" # go1.6beta2 & go1.6rc1
|
||||
"https://storage.googleapis.com/golang/go${version}.${GIMME_OS}-${arch}6.tar.gz" # go1.6beta1
|
||||
"${urls[@]}"
|
||||
)
|
||||
fi
|
||||
_do_curls "${file}" "${urls[@]}"
|
||||
}
|
||||
|
||||
# _source "version" "file.src.tar.gz"
|
||||
_source() {
|
||||
urls=(
|
||||
"https://storage.googleapis.com/golang/go${1}.src.tar.gz"
|
||||
"https://go.googlecode.com/files/go${1}.src.tar.gz"
|
||||
"https://go.googlecode.com/files/go.${1}.src.tar.gz"
|
||||
)
|
||||
_do_curls "${2}" "${urls[@]}"
|
||||
}
|
||||
|
||||
# _fetch "dir"
|
||||
_fetch() {
|
||||
mkdir -p "$(dirname "${1}")"
|
||||
|
||||
if [[ -d "${1}/.git" ]] ; then
|
||||
(
|
||||
cd "${1}"
|
||||
git remote set-url origin "${GIMME_GO_GIT_REMOTE}"
|
||||
git fetch -q --all && git fetch -q --tags
|
||||
)
|
||||
return
|
||||
fi
|
||||
|
||||
git clone -q "${GIMME_GO_GIT_REMOTE}" "${1}"
|
||||
}
|
||||
|
||||
# _checkout "version" "dir"
|
||||
_checkout() {
|
||||
_fetch "${2}"
|
||||
( cd "${2}" && {
|
||||
git reset -q --hard "origin/${1}" \
|
||||
|| git reset -q --hard "origin/go${1}" \
|
||||
|| { [ "${1}" = 'tip' ] && git reset -q --hard origin/master ; } \
|
||||
|| git reset -q --hard "refs/tags/${1}" \
|
||||
|| git reset -q --hard "refs/tags/go${1}"
|
||||
} 2>/dev/null )
|
||||
}
|
||||
|
||||
# _extract "file.tar.gz" "dir"
|
||||
_extract() {
|
||||
mkdir -p "${2}"
|
||||
tar -xf "${1}" -C "${2}" --strip-components 1
|
||||
}
|
||||
|
||||
# _setup_bootstrap
|
||||
_setup_bootstrap() {
|
||||
local versions=("1.6" "1.5" "1.4")
|
||||
|
||||
# try existing
|
||||
for v in "${versions[@]}" ; do
|
||||
for candidate in "${GIMME_ENV_PREFIX}/go${v}"*".env" ; do
|
||||
if [ -s "$candidate" ]; then
|
||||
export GOROOT_BOOTSTRAP="$(source "${candidate}" 2>/dev/null && go env GOROOT)"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
# try binary
|
||||
for v in "${versions[@]}" ; do
|
||||
if [ -n "$(_try_binary ${v} "${GIMME_HOSTARCH}")" ]; then
|
||||
export GOROOT_BOOTSTRAP="${GIMME_VERSION_PREFIX}/go${v}.${GIMME_OS}.${GIMME_HOSTARCH}"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
echo >&2 "Unable to setup go bootstrap from existing or binary";
|
||||
return 1;
|
||||
}
|
||||
|
||||
# _compile "dir"
|
||||
_compile() {
|
||||
(
|
||||
if grep -q GOROOT_BOOTSTRAP "${1}/src/make.bash" &> /dev/null; then
|
||||
_setup_bootstrap || return 1
|
||||
fi
|
||||
cd "${1}"
|
||||
if [[ -d .git ]] ; then
|
||||
git clean -dfx -q
|
||||
fi
|
||||
cd src
|
||||
export GOOS="${GIMME_OS}" GOARCH="${GIMME_ARCH}"
|
||||
export CGO_ENABLED="${GIMME_CGO_ENABLED}"
|
||||
export CC_FOR_TARGET="${GIMME_CC_FOR_TARGET}"
|
||||
|
||||
if [ "${GIMME_DEBUG}" = "2" ]; then
|
||||
./make.bash 1>&2 || return 1
|
||||
else
|
||||
local make_log="${1}/make.${GOOS}.${GOARCH}.log"
|
||||
./make.bash &> $make_log || return 1
|
||||
fi
|
||||
)
|
||||
}
|
||||
|
||||
_can_compile() {
|
||||
cat > "${GIMME_TMP}/test.go" <<'EOF'
|
||||
package main
|
||||
import "os"
|
||||
func main() {
|
||||
os.Exit(0)
|
||||
}
|
||||
EOF
|
||||
"${1}/bin/go" run "${GIMME_TMP}/test.go"
|
||||
}
|
||||
|
||||
# _env "dir"
|
||||
_env() {
|
||||
[ -d "${1}/bin" -a -x "${1}/bin/go" ] || return 1
|
||||
|
||||
# if we try to run a Darwin binary on Linux, we need to fail so 'auto' can fallback to cross-compiling from source
|
||||
# automatically
|
||||
GOROOT="${1}" "${1}/bin/go" version &> /dev/null || return 1
|
||||
|
||||
# https://twitter.com/davecheney/status/431581286918934528
|
||||
# we have to GOROOT sometimes because we use official release binaries in unofficial locations :(
|
||||
|
||||
echo
|
||||
if [[ "$(GOROOT="${1}" "${1}/bin/go" env GOHOSTOS)" = "${GIMME_OS}" ]] ; then
|
||||
echo 'unset GOOS'
|
||||
else
|
||||
echo 'export GOOS="'"${GIMME_OS}"'"'
|
||||
fi
|
||||
if [[ "$(GOROOT="${1}" "${1}/bin/go" env GOHOSTARCH)" = "${GIMME_ARCH}" ]] ; then
|
||||
echo 'unset GOARCH'
|
||||
else
|
||||
echo 'export GOARCH="'"${GIMME_ARCH}"'"'
|
||||
fi
|
||||
if ! _can_compile "${1}" >/dev/null 2>&1 ; then
|
||||
# if the compile test fails without GOROOT, then we probably need GOROOT
|
||||
echo 'export GOROOT="'"${1}"'"'
|
||||
else
|
||||
echo 'unset GOROOT'
|
||||
fi
|
||||
echo 'export PATH="'"${1}/bin"':${PATH}"'
|
||||
if [[ -z "${GIMME_SILENT_ENV}" ]] ; then
|
||||
echo 'go version >&2'
|
||||
fi
|
||||
echo
|
||||
}
|
||||
|
||||
# _env_alias "dir" "env-file"
|
||||
_env_alias() {
|
||||
if [[ "${GIMME_NO_ENV_ALIAS}" ]] ; then
|
||||
echo "${2}"
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ "$(GOROOT="${1}" "${1}/bin/go" env GOHOSTOS)" = "${GIMME_OS}" && \
|
||||
"$(GOROOT="${1}" "${1}/bin/go" env GOHOSTARCH)" = "${GIMME_ARCH}" ]] ; then
|
||||
local dest="${GIMME_ENV_PREFIX}/go${GIMME_GO_VERSION}.env"
|
||||
cp "${2}" "${dest}"
|
||||
ln -sf "${dest}" "${GIMME_ENV_PREFIX}/latest.env"
|
||||
echo "${dest}"
|
||||
else
|
||||
echo "${2}"
|
||||
fi
|
||||
}
|
||||
|
||||
_try_existing() {
|
||||
local existing_ver="${GIMME_VERSION_PREFIX}/go${GIMME_GO_VERSION}.${GIMME_OS}.${GIMME_ARCH}"
|
||||
local existing_env="${GIMME_ENV_PREFIX}/go${GIMME_GO_VERSION}.${GIMME_OS}.${GIMME_ARCH}.env"
|
||||
|
||||
if [[ -x "${existing_ver}/bin/go" && -s "${existing_env}" ]] ; then
|
||||
cat "${existing_env}"
|
||||
return
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# _try_binary "version" "arch"
|
||||
_try_binary() {
|
||||
local version=${1}
|
||||
local arch=${2}
|
||||
local bin_tgz="${GIMME_TMP}/go${version}.${GIMME_OS}.${arch}.tar.gz"
|
||||
local bin_dir="${GIMME_VERSION_PREFIX}/go${version}.${GIMME_OS}.${arch}"
|
||||
local bin_env="${GIMME_ENV_PREFIX}/go${version}.${GIMME_OS}.${arch}.env"
|
||||
|
||||
_binary "${version}" "${bin_tgz}" "${arch}" || return 1
|
||||
_extract "${bin_tgz}" "${bin_dir}" || return 1
|
||||
_env "${bin_dir}" | tee "${bin_env}" || return 1
|
||||
echo "export GIMME_ENV=\"$(_env_alias "${bin_dir}" "${bin_env}")\""
|
||||
}
|
||||
|
||||
_try_source() {
|
||||
local src_tgz="${GIMME_TMP}/go${GIMME_GO_VERSION}.src.tar.gz"
|
||||
local src_dir="${GIMME_VERSION_PREFIX}/go${GIMME_GO_VERSION}.src"
|
||||
local src_env="${GIMME_ENV_PREFIX}/go${GIMME_GO_VERSION}.${GIMME_OS}.${GIMME_ARCH}.env"
|
||||
|
||||
_source "${GIMME_GO_VERSION}" "${src_tgz}" || return 1
|
||||
_extract "${src_tgz}" "${src_dir}" || return 1
|
||||
_compile "${src_dir}" || return 1
|
||||
_env "${src_dir}" | tee "${src_env}" || return 1
|
||||
echo "export GIMME_ENV=\"$(_env_alias "${src_dir}" "${src_env}")\""
|
||||
}
|
||||
|
||||
_try_git() {
|
||||
local git_dir="${GIMME_VERSION_PREFIX}/go"
|
||||
local git_env="${GIMME_ENV_PREFIX}/go.git.${GIMME_OS}.${GIMME_ARCH}.env"
|
||||
|
||||
_checkout "${GIMME_GO_VERSION}" "${git_dir}" || return 1
|
||||
_compile "${git_dir}" || return 1
|
||||
_env "${git_dir}" | tee "${git_env}" || return 1
|
||||
echo "export GIMME_ENV=\"$(_env_alias "${git_dir}" "${git_env}")\""
|
||||
}
|
||||
|
||||
_wipe_version() {
|
||||
local env_file="${GIMME_ENV_PREFIX}/go${1}.${GIMME_OS}.${GIMME_ARCH}.env"
|
||||
|
||||
if [[ -s "${env_file}" ]] ; then
|
||||
rm -rf "$(awk -F\" '/GOROOT/ { print $2 }' "${env_file}")"
|
||||
rm -f "${env_file}"
|
||||
fi
|
||||
}
|
||||
|
||||
_list_versions() {
|
||||
if [ ! -d "${GIMME_VERSION_PREFIX}" ] ; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local current_version="$(go env GOROOT 2>/dev/null)"
|
||||
current_version="${current_version##*/go}"
|
||||
current_version="${current_version%%.${GIMME_OS}.*}"
|
||||
|
||||
for d in "${GIMME_VERSION_PREFIX}/go"*".${GIMME_OS}."* ; do
|
||||
local cleaned="${d##*/go}"
|
||||
cleaned="${cleaned%%.${GIMME_OS}.*}"
|
||||
echo -en "${cleaned}"
|
||||
if [[ $cleaned = $current_version ]] ; then
|
||||
echo -en >&2 ' <= current'
|
||||
fi
|
||||
echo
|
||||
done
|
||||
}
|
||||
|
||||
_realpath() {
|
||||
[ -d "$1" ] && echo "$(cd "$1" && pwd)" || echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")"
|
||||
}
|
||||
|
||||
_assert_version_given() {
|
||||
if [[ -z "${GIMME_GO_VERSION}" ]] ; then
|
||||
echo >&2 'error: no GIMME_GO_VERSION supplied'
|
||||
echo >&2 " ex: GIMME_GO_VERSION=1.4.1 ${0} ${@}"
|
||||
echo >&2 " ex: ${0} 1.4.1 ${@}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
: ${GIMME_OS:=$(uname -s | tr '[:upper:]' '[:lower:]')}
|
||||
: ${GIMME_HOSTOS:=$(uname -s | tr '[:upper:]' '[:lower:]')}
|
||||
: ${GIMME_ARCH:=$(uname -m)}
|
||||
: ${GIMME_HOSTARCH:=$(uname -m)}
|
||||
: ${GIMME_ENV_PREFIX:=${HOME}/.gimme/envs}
|
||||
: ${GIMME_VERSION_PREFIX:=${HOME}/.gimme/versions}
|
||||
: ${GIMME_TMP:=${TMPDIR:-/tmp}/gimme}
|
||||
: ${GIMME_GO_GIT_REMOTE:=https://github.com/golang/go.git}
|
||||
: ${GIMME_TYPE:=auto} # 'auto', 'binary', 'source', or 'git'
|
||||
: ${GIMME_BINARY_OSX:=osx10.8}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "${1}" in
|
||||
-h|--help|help|wat)
|
||||
_old_ifs="$IFS"
|
||||
IFS=';'
|
||||
awk '/^#\+ / {
|
||||
sub(/^#\+ /, "", $0) ;
|
||||
sub(/-$/, "", $0) ;
|
||||
print $0
|
||||
}' "$0" | while read line ; do
|
||||
eval "echo \"$line\""
|
||||
done
|
||||
IFS="$_old_ifs"
|
||||
exit 0
|
||||
;;
|
||||
-V|--version|version)
|
||||
echo "${GIMME_VERSION}"
|
||||
exit 0
|
||||
;;
|
||||
-l|--list|list)
|
||||
_list_versions
|
||||
exit 0
|
||||
;;
|
||||
-f|--force|force)
|
||||
force=1
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [[ -n "${1}" ]] ; then
|
||||
GIMME_GO_VERSION="${1}"
|
||||
fi
|
||||
if [[ -n "${2}" ]] ; then
|
||||
GIMME_VERSION_PREFIX="${2}"
|
||||
fi
|
||||
|
||||
case "${GIMME_ARCH}" in
|
||||
x86_64) GIMME_ARCH=amd64 ;;
|
||||
x86) GIMME_ARCH=386 ;;
|
||||
arm64)
|
||||
if [[ "${GIMME_GO_VERSION}" < "1.5" ]]; then
|
||||
echo >&2 "error: ${GIMME_ARCH} is not supported by this go version"
|
||||
echo >&2 "try go1.5 or newer"
|
||||
exit 1
|
||||
fi
|
||||
if [[ "${GIMME_HOSTOS}" = "linux" && "${GIMME_HOSTARCH}" != "${GIMME_ARCH}" ]]; then
|
||||
: ${GIMME_CC_FOR_TARGET:="aarch64-linux-gnu-gcc"}
|
||||
fi
|
||||
;;
|
||||
arm*) GIMME_ARCH=arm ;;
|
||||
esac
|
||||
|
||||
case "${GIMME_HOSTARCH}" in
|
||||
x86_64) GIMME_HOSTARCH=amd64 ;;
|
||||
x86) GIMME_HOSTARCH=386 ;;
|
||||
arm64) ;;
|
||||
arm*) GIMME_HOSTARCH=arm ;;
|
||||
esac
|
||||
|
||||
_assert_version_given "$@"
|
||||
|
||||
[ ${force} ] && _wipe_version "${GIMME_GO_VERSION}"
|
||||
|
||||
unset GOARCH
|
||||
unset GOBIN
|
||||
unset GOOS
|
||||
unset GOPATH
|
||||
unset GOROOT
|
||||
unset CGO_ENABLED
|
||||
unset CC_FOR_TARGET
|
||||
|
||||
mkdir -p "${GIMME_VERSION_PREFIX}" "${GIMME_ENV_PREFIX}"
|
||||
|
||||
GIMME_VERSION_PREFIX="$(_realpath "${GIMME_VERSION_PREFIX}")"
|
||||
GIMME_ENV_PREFIX="$(_realpath "${GIMME_ENV_PREFIX}")"
|
||||
|
||||
if ! case "${GIMME_TYPE}" in
|
||||
binary) _try_existing || _try_binary "${GIMME_GO_VERSION}" "${GIMME_ARCH}" ;;
|
||||
source) _try_source || _try_git ;;
|
||||
git) _try_git ;;
|
||||
auto) _try_existing || _try_binary "${GIMME_GO_VERSION}" "${GIMME_ARCH}" || _try_source || _try_git ;;
|
||||
*)
|
||||
echo >&2 "I don't know how to '${GIMME_TYPE}'."
|
||||
echo >&2 " Try 'auto', 'binary', 'source', or 'git'."
|
||||
exit 1
|
||||
;;
|
||||
esac ; then
|
||||
echo >&2 "I don't have any idea what to do with '${GIMME_GO_VERSION}'."
|
||||
echo >&2 " (using type '${GIMME_TYPE}')"
|
||||
exit 1
|
||||
fi
|
||||
52
vendor/github.com/coreos/ignition/internal/exec/engine.go
generated
vendored
52
vendor/github.com/coreos/ignition/internal/exec/engine.go
generated
vendored
@@ -22,11 +22,14 @@ import (
|
||||
|
||||
"github.com/coreos/ignition/config"
|
||||
"github.com/coreos/ignition/config/types"
|
||||
"github.com/coreos/ignition/config/validate/report"
|
||||
"github.com/coreos/ignition/internal/exec/stages"
|
||||
"github.com/coreos/ignition/internal/exec/util"
|
||||
"github.com/coreos/ignition/internal/log"
|
||||
"github.com/coreos/ignition/internal/providers"
|
||||
putil "github.com/coreos/ignition/internal/providers/util"
|
||||
"github.com/coreos/ignition/internal/util"
|
||||
"github.com/coreos/ignition/internal/resource"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -48,17 +51,20 @@ var (
|
||||
// Engine represents the entity that fetches and executes a configuration.
|
||||
type Engine struct {
|
||||
ConfigCache string
|
||||
OnlineTimeout time.Duration
|
||||
Logger *log.Logger
|
||||
Root string
|
||||
Provider providers.Provider
|
||||
FetchFunc providers.FuncFetchConfig
|
||||
OemBaseConfig types.Config
|
||||
DefaultUserConfig types.Config
|
||||
|
||||
client resource.HttpClient
|
||||
}
|
||||
|
||||
// Run executes the stage of the given name. It returns true if the stage
|
||||
// successfully ran and false if there were any errors.
|
||||
func (e Engine) Run(stageName string) bool {
|
||||
e.client = resource.NewHttpClient(e.Logger)
|
||||
|
||||
cfg, err := e.acquireConfig()
|
||||
switch err {
|
||||
case nil:
|
||||
@@ -72,7 +78,7 @@ func (e Engine) Run(stageName string) bool {
|
||||
|
||||
e.Logger.PushPrefix(stageName)
|
||||
defer e.Logger.PopPrefix()
|
||||
return stages.Get(stageName).Create(e.Logger, e.Root).Run(config.Append(baseConfig, config.Append(e.OemBaseConfig, cfg)))
|
||||
return stages.Get(stageName).Create(e.Logger, &e.client, e.Root).Run(config.Append(baseConfig, config.Append(e.OemBaseConfig, cfg)))
|
||||
}
|
||||
|
||||
// acquireConfig returns the configuration, first checking a local cache
|
||||
@@ -93,7 +99,6 @@ func (e Engine) acquireConfig() (cfg types.Config, err error) {
|
||||
e.Logger.Crit("failed to fetch config: %s", err)
|
||||
return
|
||||
}
|
||||
e.Logger.Debug("fetched config: %+v", cfg)
|
||||
|
||||
// Populate the config cache.
|
||||
b, err = json.Marshal(cfg)
|
||||
@@ -113,20 +118,13 @@ func (e Engine) acquireConfig() (cfg types.Config, err error) {
|
||||
// returning an error if the provider is unavailable. This will also render the
|
||||
// config (see renderConfig) before returning.
|
||||
func (e Engine) fetchProviderConfig() (types.Config, error) {
|
||||
if err := putil.WaitUntilOnline(e.Provider, e.OnlineTimeout); err != nil {
|
||||
cfg, r, err := e.FetchFunc(e.Logger, &e.client)
|
||||
e.logReport(r)
|
||||
if err != nil {
|
||||
return types.Config{}, err
|
||||
}
|
||||
|
||||
cfg, err := e.Provider.FetchConfig()
|
||||
switch err {
|
||||
case config.ErrDeprecated:
|
||||
e.Logger.Warning("%v: the provided config format is deprecated and will not be supported in the future", err)
|
||||
fallthrough
|
||||
case nil:
|
||||
return e.renderConfig(cfg)
|
||||
default:
|
||||
return types.Config{}, err
|
||||
}
|
||||
return e.renderConfig(cfg)
|
||||
}
|
||||
|
||||
// renderConfig evaluates "ignition.config.replace" and "ignition.config.append"
|
||||
@@ -155,7 +153,7 @@ func (e Engine) renderConfig(cfg types.Config) (types.Config, error) {
|
||||
// fetchReferencedConfig fetches, renders, and attempts to verify the requested
|
||||
// config.
|
||||
func (e Engine) fetchReferencedConfig(cfgRef types.ConfigReference) (types.Config, error) {
|
||||
rawCfg, err := util.FetchResource(e.Logger, url.URL(cfgRef.Source))
|
||||
rawCfg, err := resource.Fetch(e.Logger, &e.client, context.Background(), url.URL(cfgRef.Source))
|
||||
if err != nil {
|
||||
return types.Config{}, err
|
||||
}
|
||||
@@ -164,10 +162,26 @@ func (e Engine) fetchReferencedConfig(cfgRef types.ConfigReference) (types.Confi
|
||||
return types.Config{}, err
|
||||
}
|
||||
|
||||
cfg, err := config.Parse(rawCfg)
|
||||
cfg, r, err := config.Parse(rawCfg)
|
||||
e.logReport(r)
|
||||
if err != nil {
|
||||
return types.Config{}, err
|
||||
}
|
||||
|
||||
return e.renderConfig(cfg)
|
||||
}
|
||||
|
||||
func (e Engine) logReport(r report.Report) {
|
||||
for _, entry := range r.Entries {
|
||||
switch entry.Kind {
|
||||
case report.EntryError:
|
||||
e.Logger.Crit("%v", entry)
|
||||
case report.EntryWarning:
|
||||
e.Logger.Warning("%v", entry)
|
||||
case report.EntryDeprecated:
|
||||
e.Logger.Warning("%v: the provided config format is deprecated and will not be supported in the future.", entry)
|
||||
case report.EntryInfo:
|
||||
e.Logger.Info("%v", entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
98
vendor/github.com/coreos/ignition/internal/exec/engine_test.go
generated
vendored
98
vendor/github.com/coreos/ignition/internal/exec/engine_test.go
generated
vendored
@@ -1,98 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package exec
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/ignition/config/types"
|
||||
"github.com/coreos/ignition/internal/providers"
|
||||
)
|
||||
|
||||
type mockProvider struct {
|
||||
config types.Config
|
||||
err error
|
||||
online bool
|
||||
retry bool
|
||||
backoff time.Duration
|
||||
}
|
||||
|
||||
func (p mockProvider) FetchConfig() (types.Config, error) { return p.config, p.err }
|
||||
func (p mockProvider) IsOnline() bool { return p.online }
|
||||
func (p mockProvider) ShouldRetry() bool { return p.retry }
|
||||
func (p mockProvider) BackoffDuration() time.Duration { return p.backoff }
|
||||
|
||||
// TODO
|
||||
func TestRun(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFetchConfigs(t *testing.T) {
|
||||
type in struct {
|
||||
provider mockProvider
|
||||
timeout time.Duration
|
||||
}
|
||||
type out struct {
|
||||
config types.Config
|
||||
err error
|
||||
}
|
||||
|
||||
online := mockProvider{
|
||||
online: true,
|
||||
config: types.Config{
|
||||
Systemd: types.Systemd{
|
||||
Units: []types.SystemdUnit{},
|
||||
},
|
||||
},
|
||||
}
|
||||
error := mockProvider{
|
||||
online: true,
|
||||
err: errors.New("test error"),
|
||||
}
|
||||
offline := mockProvider{online: false}
|
||||
|
||||
tests := []struct {
|
||||
in in
|
||||
out out
|
||||
}{
|
||||
{
|
||||
in: in{provider: online, timeout: time.Second},
|
||||
out: out{config: online.config},
|
||||
},
|
||||
{
|
||||
in: in{provider: error, timeout: time.Second},
|
||||
out: out{config: types.Config{}, err: error.err},
|
||||
},
|
||||
{
|
||||
in: in{provider: offline, timeout: time.Second},
|
||||
out: out{config: types.Config{}, err: providers.ErrNoProvider},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
config, err := Engine{
|
||||
Provider: test.in.provider,
|
||||
OnlineTimeout: test.in.timeout,
|
||||
}.fetchProviderConfig()
|
||||
if !reflect.DeepEqual(test.out.config, config) {
|
||||
t.Errorf("#%d: bad provider: want %+v, got %+v", i, test.out.config, config)
|
||||
}
|
||||
if test.out.err != err {
|
||||
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
66
vendor/github.com/coreos/ignition/internal/exec/stages/disks/disks.go
generated
vendored
66
vendor/github.com/coreos/ignition/internal/exec/stages/disks/disks.go
generated
vendored
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/coreos/ignition/internal/exec/stages"
|
||||
"github.com/coreos/ignition/internal/exec/util"
|
||||
"github.com/coreos/ignition/internal/log"
|
||||
"github.com/coreos/ignition/internal/resource"
|
||||
"github.com/coreos/ignition/internal/sgdisk"
|
||||
"github.com/coreos/ignition/internal/systemd"
|
||||
)
|
||||
@@ -40,11 +41,14 @@ func init() {
|
||||
|
||||
type creator struct{}
|
||||
|
||||
func (creator) Create(logger *log.Logger, root string) stages.Stage {
|
||||
return &stage{util.Util{
|
||||
DestDir: root,
|
||||
Logger: logger,
|
||||
}}
|
||||
func (creator) Create(logger *log.Logger, client *resource.HttpClient, root string) stages.Stage {
|
||||
return &stage{
|
||||
Util: util.Util{
|
||||
DestDir: root,
|
||||
Logger: logger,
|
||||
},
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (creator) Name() string {
|
||||
@@ -53,6 +57,8 @@ func (creator) Name() string {
|
||||
|
||||
type stage struct {
|
||||
util.Util
|
||||
|
||||
client *resource.HttpClient
|
||||
}
|
||||
|
||||
func (stage) Name() string {
|
||||
@@ -87,6 +93,33 @@ func (s stage) waitOnDevices(devs []string, ctxt string) error {
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to wait on %s devs: %v", ctxt, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createDeviceAliases creates device aliases for every device in devs.
|
||||
func (s stage) createDeviceAliases(devs []string) error {
|
||||
for _, dev := range devs {
|
||||
target, err := util.CreateDeviceAlias(dev)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create device alias for %q: %v", dev, err)
|
||||
}
|
||||
s.Logger.Info("created device alias for %q: %q -> %q", dev, util.DeviceAlias(dev), target)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitOnDevicesAndCreateAliases simply wraps waitOnDevices and createDeviceAliases.
|
||||
func (s stage) waitOnDevicesAndCreateAliases(devs []string, ctxt string) error {
|
||||
if err := s.waitOnDevices(devs, ctxt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.createDeviceAliases(devs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -103,15 +136,17 @@ func (s stage) createPartitions(config types.Config) error {
|
||||
devs = append(devs, string(disk.Device))
|
||||
}
|
||||
|
||||
if err := s.waitOnDevices(devs, "disks"); err != nil {
|
||||
if err := s.waitOnDevicesAndCreateAliases(devs, "disks"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, dev := range config.Storage.Disks {
|
||||
devAlias := util.DeviceAlias(string(dev.Device))
|
||||
|
||||
err := s.Logger.LogOp(func() error {
|
||||
op := sgdisk.Begin(s.Logger, string(dev.Device))
|
||||
op := sgdisk.Begin(s.Logger, devAlias)
|
||||
if dev.WipeTable {
|
||||
s.Logger.Info("wiping partition table requested on %q", dev.Device)
|
||||
s.Logger.Info("wiping partition table requested on %q", devAlias)
|
||||
op.WipeTable(true)
|
||||
}
|
||||
|
||||
@@ -129,7 +164,7 @@ func (s stage) createPartitions(config types.Config) error {
|
||||
return fmt.Errorf("commit failure: %v", err)
|
||||
}
|
||||
return nil
|
||||
}, "partitioning %q", dev.Device)
|
||||
}, "partitioning %q", devAlias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -153,7 +188,7 @@ func (s stage) createRaids(config types.Config) error {
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.waitOnDevices(devs, "raids"); err != nil {
|
||||
if err := s.waitOnDevicesAndCreateAliases(devs, "raids"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -173,7 +208,7 @@ func (s stage) createRaids(config types.Config) error {
|
||||
}
|
||||
|
||||
for _, dev := range md.Devices {
|
||||
args = append(args, string(dev))
|
||||
args = append(args, util.DeviceAlias(string(dev)))
|
||||
}
|
||||
|
||||
if err := s.Logger.LogCmd(
|
||||
@@ -207,7 +242,7 @@ func (s stage) createFilesystems(config types.Config) error {
|
||||
devs = append(devs, string(fs.Device))
|
||||
}
|
||||
|
||||
if err := s.waitOnDevices(devs, "filesystems"); err != nil {
|
||||
if err := s.waitOnDevicesAndCreateAliases(devs, "filesystems"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -248,13 +283,14 @@ func (s stage) createFilesystem(fs types.FilesystemMount) error {
|
||||
return fmt.Errorf("unsupported filesystem format: %q", fs.Format)
|
||||
}
|
||||
|
||||
args = append(args, string(fs.Device))
|
||||
devAlias := util.DeviceAlias(string(fs.Device))
|
||||
args = append(args, devAlias)
|
||||
if err := s.Logger.LogCmd(
|
||||
exec.Command(mkfs, args...),
|
||||
"creating %q filesystem on %q",
|
||||
fs.Format, string(fs.Device),
|
||||
fs.Format, devAlias,
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to run %q: %v %v", mkfs, err, args)
|
||||
return fmt.Errorf("mkfs failed: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
24
vendor/github.com/coreos/ignition/internal/exec/stages/files/files.go
generated
vendored
24
vendor/github.com/coreos/ignition/internal/exec/stages/files/files.go
generated
vendored
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/coreos/ignition/internal/exec/stages"
|
||||
"github.com/coreos/ignition/internal/exec/util"
|
||||
"github.com/coreos/ignition/internal/log"
|
||||
"github.com/coreos/ignition/internal/resource"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -41,11 +42,14 @@ func init() {
|
||||
|
||||
type creator struct{}
|
||||
|
||||
func (creator) Create(logger *log.Logger, root string) stages.Stage {
|
||||
return &stage{util.Util{
|
||||
DestDir: root,
|
||||
Logger: logger,
|
||||
}}
|
||||
func (creator) Create(logger *log.Logger, client *resource.HttpClient, root string) stages.Stage {
|
||||
return &stage{
|
||||
Util: util.Util{
|
||||
DestDir: root,
|
||||
Logger: logger,
|
||||
},
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (creator) Name() string {
|
||||
@@ -54,6 +58,8 @@ func (creator) Name() string {
|
||||
|
||||
type stage struct {
|
||||
util.Util
|
||||
|
||||
client *resource.HttpClient
|
||||
}
|
||||
|
||||
func (stage) Name() string {
|
||||
@@ -164,7 +170,7 @@ func (s stage) createFiles(fs types.Filesystem, files []types.File) error {
|
||||
DestDir: mnt,
|
||||
}
|
||||
for _, f := range files {
|
||||
file := util.RenderFile(s.Logger, f)
|
||||
file := util.RenderFile(s.Logger, s.client, f)
|
||||
if file == nil {
|
||||
return fmt.Errorf("failed to resolve file %q", f.Path)
|
||||
}
|
||||
@@ -224,7 +230,7 @@ func (s stage) writeSystemdUnit(unit types.SystemdUnit) error {
|
||||
f := util.FileFromUnitDropin(unit, dropin)
|
||||
if err := s.Logger.LogOp(
|
||||
func() error { return s.WriteFile(f) },
|
||||
"writing dropin %q at %q", dropin.Name, f.Path,
|
||||
"writing drop-in %q at %q", dropin.Name, f.Path,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -243,7 +249,7 @@ func (s stage) writeSystemdUnit(unit types.SystemdUnit) error {
|
||||
}
|
||||
|
||||
return nil
|
||||
}, "writing unit %q", unit.Name)
|
||||
}, "processing unit %q", unit.Name)
|
||||
}
|
||||
|
||||
// writeNetworkdUnit creates the specified unit. If the contents of the unit or
|
||||
@@ -263,7 +269,7 @@ func (s stage) writeNetworkdUnit(unit types.NetworkdUnit) error {
|
||||
}
|
||||
|
||||
return nil
|
||||
}, "writing unit %q", unit.Name)
|
||||
}, "processing unit %q", unit.Name)
|
||||
}
|
||||
|
||||
// createPasswd creates the users and groups as described in config.Passwd.
|
||||
|
||||
3
vendor/github.com/coreos/ignition/internal/exec/stages/stages.go
generated
vendored
3
vendor/github.com/coreos/ignition/internal/exec/stages/stages.go
generated
vendored
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/coreos/ignition/config/types"
|
||||
"github.com/coreos/ignition/internal/log"
|
||||
"github.com/coreos/ignition/internal/registry"
|
||||
"github.com/coreos/ignition/internal/resource"
|
||||
)
|
||||
|
||||
// Stage is responsible for actually executing a stage of the configuration.
|
||||
@@ -29,7 +30,7 @@ type Stage interface {
|
||||
// StageCreator is responsible for instantiating a particular stage given a
|
||||
// logger and root path under the root partition.
|
||||
type StageCreator interface {
|
||||
Create(logger *log.Logger, root string) Stage
|
||||
Create(logger *log.Logger, client *resource.HttpClient, root string) Stage
|
||||
Name() string
|
||||
}
|
||||
|
||||
|
||||
55
vendor/github.com/coreos/ignition/internal/exec/util/device_alias.go
generated
vendored
Normal file
55
vendor/github.com/coreos/ignition/internal/exec/util/device_alias.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
const deviceAliasDir = "/dev_aliases"
|
||||
|
||||
// DeviceAlias returns the aliased form of the supplied path.
|
||||
// Note device paths in ignition are always absolute.
|
||||
func DeviceAlias(path string) string {
|
||||
return filepath.Join(deviceAliasDir, filepath.Clean(path))
|
||||
}
|
||||
|
||||
// CreateDeviceAlias creates a device alias for the supplied path.
|
||||
// On success the canonicalized path used as the alias target is returned.
|
||||
func CreateDeviceAlias(path string) (string, error) {
|
||||
target, err := filepath.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
alias := DeviceAlias(path)
|
||||
|
||||
if err := os.Remove(alias); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err = os.MkdirAll(filepath.Dir(alias), 0750); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
if err = os.Symlink(target, alias); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return target, nil
|
||||
}
|
||||
163
vendor/github.com/coreos/ignition/internal/exec/util/file.go
generated
vendored
163
vendor/github.com/coreos/ignition/internal/exec/util/file.go
generated
vendored
@@ -15,8 +15,11 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"encoding/hex"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -24,7 +27,9 @@ import (
|
||||
|
||||
"github.com/coreos/ignition/config/types"
|
||||
"github.com/coreos/ignition/internal/log"
|
||||
"github.com/coreos/ignition/internal/util"
|
||||
"github.com/coreos/ignition/internal/resource"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -33,69 +38,127 @@ const (
|
||||
)
|
||||
|
||||
type File struct {
|
||||
Path types.Path
|
||||
Contents []byte
|
||||
Mode os.FileMode
|
||||
Uid int
|
||||
Gid int
|
||||
io.ReadCloser
|
||||
hash.Hash
|
||||
Path types.Path
|
||||
Mode os.FileMode
|
||||
Uid int
|
||||
Gid int
|
||||
expectedSum string
|
||||
}
|
||||
|
||||
func RenderFile(l *log.Logger, f types.File) *File {
|
||||
var contents []byte
|
||||
func (f File) Verify() error {
|
||||
if f.Hash == nil {
|
||||
return nil
|
||||
}
|
||||
sum := f.Sum(nil)
|
||||
encodedSum := make([]byte, hex.EncodedLen(len(sum)))
|
||||
hex.Encode(encodedSum, sum)
|
||||
|
||||
if string(encodedSum) != f.expectedSum {
|
||||
return ErrHashMismatch{
|
||||
Calculated: string(encodedSum),
|
||||
Expected: f.expectedSum,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// newHashedReader returns a new ReadCloser that also writes to the provided hash.
|
||||
func newHashedReader(reader io.ReadCloser, hasher hash.Hash) io.ReadCloser {
|
||||
return struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}{
|
||||
Reader: io.TeeReader(reader, hasher),
|
||||
Closer: reader,
|
||||
}
|
||||
}
|
||||
|
||||
// RenderFile returns a *File with a Reader that downloads, hashes, and decompresses the incoming data.
|
||||
// It returns nil if f had invalid options. Errors reading/verifying/decompressing the file will
|
||||
// present themselves when the Reader is actually read from.
|
||||
func RenderFile(l *log.Logger, c *resource.HttpClient, f types.File) *File {
|
||||
var reader io.ReadCloser
|
||||
var err error
|
||||
var expectedSum string
|
||||
|
||||
fetch := func() error {
|
||||
contents, err = util.FetchResource(l, url.URL(f.Contents.Source))
|
||||
return err
|
||||
}
|
||||
|
||||
validate := func() error {
|
||||
return util.AssertValid(f.Contents.Verification, contents)
|
||||
}
|
||||
|
||||
decompress := func() error {
|
||||
contents, err = decompressFile(l, f, contents)
|
||||
return err
|
||||
}
|
||||
|
||||
if l.LogOp(fetch, "fetching file %q", f.Path) != nil {
|
||||
reader, err = resource.FetchAsReader(l, c, context.Background(), url.URL(f.Contents.Source))
|
||||
if err != nil {
|
||||
l.Crit("Error fetching file %q: %v", f.Path, err)
|
||||
return nil
|
||||
}
|
||||
if l.LogOp(validate, "validating file contents") != nil {
|
||||
|
||||
fileHash, err := GetHasher(f.Contents.Verification)
|
||||
if err != nil {
|
||||
l.Crit("Error verifying file %q: %v", f.Path, err)
|
||||
return nil
|
||||
}
|
||||
if l.LogOp(decompress, "decompressing file contents") != nil {
|
||||
|
||||
if fileHash != nil {
|
||||
reader = newHashedReader(reader, fileHash)
|
||||
expectedSum = f.Contents.Verification.Hash.Sum
|
||||
}
|
||||
|
||||
reader, err = decompressFileStream(l, f, reader)
|
||||
if err != nil {
|
||||
l.Crit("Error decompressing file %q: %v", f.Path, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return &File{
|
||||
Path: f.Path,
|
||||
Contents: []byte(contents),
|
||||
Mode: os.FileMode(f.Mode),
|
||||
Uid: f.User.Id,
|
||||
Gid: f.Group.Id,
|
||||
Path: f.Path,
|
||||
ReadCloser: reader,
|
||||
Hash: fileHash,
|
||||
Mode: os.FileMode(f.Mode),
|
||||
Uid: f.User.Id,
|
||||
Gid: f.Group.Id,
|
||||
expectedSum: expectedSum,
|
||||
}
|
||||
}
|
||||
|
||||
func decompressFile(l *log.Logger, f types.File, contents []byte) ([]byte, error) {
|
||||
// gzipReader is a wrapper for gzip's reader that closes the stream it wraps as well
|
||||
// as itself when Close() is called.
|
||||
type gzipReader struct {
|
||||
*gzip.Reader //actually a ReadCloser
|
||||
source io.Closer
|
||||
}
|
||||
|
||||
func newGzipReader(reader io.ReadCloser) (io.ReadCloser, error) {
|
||||
gzReader, err := gzip.NewReader(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return gzipReader{
|
||||
Reader: gzReader,
|
||||
source: reader,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (gz gzipReader) Close() error {
|
||||
if err := gz.Reader.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := gz.source.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func decompressFileStream(l *log.Logger, f types.File, contents io.ReadCloser) (io.ReadCloser, error) {
|
||||
switch f.Contents.Compression {
|
||||
case "":
|
||||
return contents, nil
|
||||
case "gzip":
|
||||
reader, err := gzip.NewReader(bytes.NewReader(contents))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
return ioutil.ReadAll(reader)
|
||||
return newGzipReader(contents)
|
||||
default:
|
||||
return nil, types.ErrCompressionInvalid
|
||||
}
|
||||
}
|
||||
|
||||
// WriteFile creates and writes the file described by f using the provided context
|
||||
// WriteFile creates and writes the file described by f using the provided context.
|
||||
func (u Util) WriteFile(f *File) error {
|
||||
defer f.Close()
|
||||
var err error
|
||||
|
||||
path := u.JoinPath(string(f.Path))
|
||||
@@ -109,14 +172,22 @@ func (u Util) WriteFile(f *File) error {
|
||||
if tmp, err = ioutil.TempFile(filepath.Dir(path), "tmp"); err != nil {
|
||||
return err
|
||||
}
|
||||
tmp.Close()
|
||||
|
||||
defer func() {
|
||||
tmp.Close()
|
||||
if err != nil {
|
||||
os.Remove(tmp.Name())
|
||||
}
|
||||
}()
|
||||
|
||||
if err := ioutil.WriteFile(tmp.Name(), f.Contents, f.Mode); err != nil {
|
||||
fileWriter := bufio.NewWriter(tmp)
|
||||
|
||||
if _, err = io.Copy(fileWriter, f); err != nil {
|
||||
return err
|
||||
}
|
||||
fileWriter.Flush()
|
||||
|
||||
if err = f.Verify(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -124,22 +195,22 @@ func (u Util) WriteFile(f *File) error {
|
||||
// by using syscall.Fchown() and syscall.Fchmod()
|
||||
|
||||
// Ensure the ownership and mode are as requested (since WriteFile can be affected by sticky bit)
|
||||
if err := os.Chown(tmp.Name(), f.Uid, f.Gid); err != nil {
|
||||
if err = os.Chown(tmp.Name(), f.Uid, f.Gid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Chmod(tmp.Name(), f.Mode); err != nil {
|
||||
if err = os.Chmod(tmp.Name(), f.Mode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Rename(tmp.Name(), path); err != nil {
|
||||
if err = os.Rename(tmp.Name(), path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// mkdirForFile helper creates the directory components of path
|
||||
// mkdirForFile helper creates the directory components of path.
|
||||
func mkdirForFile(path string) error {
|
||||
return os.MkdirAll(filepath.Dir(path), DefaultDirectoryPermissions)
|
||||
}
|
||||
|
||||
20
vendor/github.com/coreos/ignition/internal/exec/util/unit.go
generated
vendored
20
vendor/github.com/coreos/ignition/internal/exec/util/unit.go
generated
vendored
@@ -15,7 +15,9 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
@@ -29,25 +31,25 @@ const (
|
||||
|
||||
func FileFromSystemdUnit(unit types.SystemdUnit) *File {
|
||||
return &File{
|
||||
Path: types.Path(filepath.Join(SystemdUnitsPath(), string(unit.Name))),
|
||||
Contents: []byte(unit.Contents),
|
||||
Mode: DefaultFilePermissions,
|
||||
Path: types.Path(filepath.Join(SystemdUnitsPath(), string(unit.Name))),
|
||||
ReadCloser: ioutil.NopCloser(bytes.NewReader([]byte(unit.Contents))),
|
||||
Mode: DefaultFilePermissions,
|
||||
}
|
||||
}
|
||||
|
||||
func FileFromNetworkdUnit(unit types.NetworkdUnit) *File {
|
||||
return &File{
|
||||
Path: types.Path(filepath.Join(NetworkdUnitsPath(), string(unit.Name))),
|
||||
Contents: []byte(unit.Contents),
|
||||
Mode: DefaultFilePermissions,
|
||||
Path: types.Path(filepath.Join(NetworkdUnitsPath(), string(unit.Name))),
|
||||
ReadCloser: ioutil.NopCloser(bytes.NewReader([]byte(unit.Contents))),
|
||||
Mode: DefaultFilePermissions,
|
||||
}
|
||||
}
|
||||
|
||||
func FileFromUnitDropin(unit types.SystemdUnit, dropin types.SystemdUnitDropIn) *File {
|
||||
return &File{
|
||||
Path: types.Path(filepath.Join(SystemdDropinsPath(string(unit.Name)), string(dropin.Name))),
|
||||
Contents: []byte(dropin.Contents),
|
||||
Mode: DefaultFilePermissions,
|
||||
Path: types.Path(filepath.Join(SystemdDropinsPath(string(unit.Name)), string(dropin.Name))),
|
||||
ReadCloser: ioutil.NopCloser(bytes.NewReader([]byte(dropin.Contents))),
|
||||
Mode: DefaultFilePermissions,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user